x86/mce: Handle Local MCE events
[deliverable/linux.git] / arch / x86 / kernel / cpu / mcheck / mce.c
1 /*
2 * Machine check handler.
3 *
4 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 * Rest from unknown author(s).
6 * 2004 Andi Kleen. Rewrote most of it.
7 * Copyright 2008 Intel Corporation
8 * Author: Andi Kleen
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/thread_info.h>
14 #include <linux/capability.h>
15 #include <linux/miscdevice.h>
16 #include <linux/ratelimit.h>
17 #include <linux/kallsyms.h>
18 #include <linux/rcupdate.h>
19 #include <linux/kobject.h>
20 #include <linux/uaccess.h>
21 #include <linux/kdebug.h>
22 #include <linux/kernel.h>
23 #include <linux/percpu.h>
24 #include <linux/string.h>
25 #include <linux/device.h>
26 #include <linux/syscore_ops.h>
27 #include <linux/delay.h>
28 #include <linux/ctype.h>
29 #include <linux/sched.h>
30 #include <linux/sysfs.h>
31 #include <linux/types.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/kmod.h>
35 #include <linux/poll.h>
36 #include <linux/nmi.h>
37 #include <linux/cpu.h>
38 #include <linux/smp.h>
39 #include <linux/fs.h>
40 #include <linux/mm.h>
41 #include <linux/debugfs.h>
42 #include <linux/irq_work.h>
43 #include <linux/export.h>
44
45 #include <asm/processor.h>
46 #include <asm/traps.h>
47 #include <asm/tlbflush.h>
48 #include <asm/mce.h>
49 #include <asm/msr.h>
50
51 #include "mce-internal.h"
52
53 static DEFINE_MUTEX(mce_chrdev_read_mutex);
54
55 #define rcu_dereference_check_mce(p) \
56 rcu_dereference_index_check((p), \
57 rcu_read_lock_sched_held() || \
58 lockdep_is_held(&mce_chrdev_read_mutex))
59
60 #define CREATE_TRACE_POINTS
61 #include <trace/events/mce.h>
62
63 #define SPINUNIT 100 /* 100ns */
64
65 DEFINE_PER_CPU(unsigned, mce_exception_count);
66
67 struct mce_bank *mce_banks __read_mostly;
68 struct mce_vendor_flags mce_flags __read_mostly;
69
70 struct mca_config mca_cfg __read_mostly = {
71 .bootlog = -1,
72 /*
73 * Tolerant levels:
74 * 0: always panic on uncorrected errors, log corrected errors
75 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
76 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
77 * 3: never panic or SIGBUS, log all errors (for testing only)
78 */
79 .tolerant = 1,
80 .monarch_timeout = -1
81 };
82
83 /* User mode helper program triggered by machine check event */
84 static unsigned long mce_need_notify;
85 static char mce_helper[128];
86 static char *mce_helper_argv[2] = { mce_helper, NULL };
87
88 static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
89
90 static DEFINE_PER_CPU(struct mce, mces_seen);
91 static int cpu_missing;
92
93 /*
94 * MCA banks polled by the period polling timer for corrected events.
95 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
96 */
97 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
98 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
99 };
100
101 /*
102 * MCA banks controlled through firmware first for corrected errors.
103 * This is a global list of banks for which we won't enable CMCI and we
104 * won't poll. Firmware controls these banks and is responsible for
105 * reporting corrected errors through GHES. Uncorrected/recoverable
106 * errors are still notified through a machine check.
107 */
108 mce_banks_t mce_banks_ce_disabled;
109
110 static DEFINE_PER_CPU(struct work_struct, mce_work);
111
112 static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
113
114 /*
115 * CPU/chipset specific EDAC code can register a notifier call here to print
116 * MCE errors in a human-readable form.
117 */
118 static ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
119
120 /* Do initial initialization of a struct mce */
121 void mce_setup(struct mce *m)
122 {
123 memset(m, 0, sizeof(struct mce));
124 m->cpu = m->extcpu = smp_processor_id();
125 rdtscll(m->tsc);
126 /* We hope get_seconds stays lockless */
127 m->time = get_seconds();
128 m->cpuvendor = boot_cpu_data.x86_vendor;
129 m->cpuid = cpuid_eax(1);
130 m->socketid = cpu_data(m->extcpu).phys_proc_id;
131 m->apicid = cpu_data(m->extcpu).initial_apicid;
132 rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
133 }
134
135 DEFINE_PER_CPU(struct mce, injectm);
136 EXPORT_PER_CPU_SYMBOL_GPL(injectm);
137
138 /*
139 * Lockless MCE logging infrastructure.
140 * This avoids deadlocks on printk locks without having to break locks. Also
141 * separate MCEs from kernel messages to avoid bogus bug reports.
142 */
143
144 static struct mce_log mcelog = {
145 .signature = MCE_LOG_SIGNATURE,
146 .len = MCE_LOG_LEN,
147 .recordlen = sizeof(struct mce),
148 };
149
150 void mce_log(struct mce *mce)
151 {
152 unsigned next, entry;
153
154 /* Emit the trace record: */
155 trace_mce_record(mce);
156
157 atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
158
159 mce->finished = 0;
160 wmb();
161 for (;;) {
162 entry = rcu_dereference_check_mce(mcelog.next);
163 for (;;) {
164
165 /*
166 * When the buffer fills up discard new entries.
167 * Assume that the earlier errors are the more
168 * interesting ones:
169 */
170 if (entry >= MCE_LOG_LEN) {
171 set_bit(MCE_OVERFLOW,
172 (unsigned long *)&mcelog.flags);
173 return;
174 }
175 /* Old left over entry. Skip: */
176 if (mcelog.entry[entry].finished) {
177 entry++;
178 continue;
179 }
180 break;
181 }
182 smp_rmb();
183 next = entry + 1;
184 if (cmpxchg(&mcelog.next, entry, next) == entry)
185 break;
186 }
187 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
188 wmb();
189 mcelog.entry[entry].finished = 1;
190 wmb();
191
192 mce->finished = 1;
193 set_bit(0, &mce_need_notify);
194 }
195
196 static void drain_mcelog_buffer(void)
197 {
198 unsigned int next, i, prev = 0;
199
200 next = ACCESS_ONCE(mcelog.next);
201
202 do {
203 struct mce *m;
204
205 /* drain what was logged during boot */
206 for (i = prev; i < next; i++) {
207 unsigned long start = jiffies;
208 unsigned retries = 1;
209
210 m = &mcelog.entry[i];
211
212 while (!m->finished) {
213 if (time_after_eq(jiffies, start + 2*retries))
214 retries++;
215
216 cpu_relax();
217
218 if (!m->finished && retries >= 4) {
219 pr_err("skipping error being logged currently!\n");
220 break;
221 }
222 }
223 smp_rmb();
224 atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
225 }
226
227 memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m));
228 prev = next;
229 next = cmpxchg(&mcelog.next, prev, 0);
230 } while (next != prev);
231 }
232
233
234 void mce_register_decode_chain(struct notifier_block *nb)
235 {
236 atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
237 drain_mcelog_buffer();
238 }
239 EXPORT_SYMBOL_GPL(mce_register_decode_chain);
240
241 void mce_unregister_decode_chain(struct notifier_block *nb)
242 {
243 atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
244 }
245 EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
246
247 static void print_mce(struct mce *m)
248 {
249 int ret = 0;
250
251 pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
252 m->extcpu, m->mcgstatus, m->bank, m->status);
253
254 if (m->ip) {
255 pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
256 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
257 m->cs, m->ip);
258
259 if (m->cs == __KERNEL_CS)
260 print_symbol("{%s}", m->ip);
261 pr_cont("\n");
262 }
263
264 pr_emerg(HW_ERR "TSC %llx ", m->tsc);
265 if (m->addr)
266 pr_cont("ADDR %llx ", m->addr);
267 if (m->misc)
268 pr_cont("MISC %llx ", m->misc);
269
270 pr_cont("\n");
271 /*
272 * Note this output is parsed by external tools and old fields
273 * should not be changed.
274 */
275 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
276 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
277 cpu_data(m->extcpu).microcode);
278
279 /*
280 * Print out human-readable details about the MCE error,
281 * (if the CPU has an implementation for that)
282 */
283 ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
284 if (ret == NOTIFY_STOP)
285 return;
286
287 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
288 }
289
290 #define PANIC_TIMEOUT 5 /* 5 seconds */
291
292 static atomic_t mce_panicked;
293
294 static int fake_panic;
295 static atomic_t mce_fake_panicked;
296
297 /* Panic in progress. Enable interrupts and wait for final IPI */
298 static void wait_for_panic(void)
299 {
300 long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
301
302 preempt_disable();
303 local_irq_enable();
304 while (timeout-- > 0)
305 udelay(1);
306 if (panic_timeout == 0)
307 panic_timeout = mca_cfg.panic_timeout;
308 panic("Panicing machine check CPU died");
309 }
310
311 static void mce_panic(const char *msg, struct mce *final, char *exp)
312 {
313 int i, apei_err = 0;
314
315 if (!fake_panic) {
316 /*
317 * Make sure only one CPU runs in machine check panic
318 */
319 if (atomic_inc_return(&mce_panicked) > 1)
320 wait_for_panic();
321 barrier();
322
323 bust_spinlocks(1);
324 console_verbose();
325 } else {
326 /* Don't log too much for fake panic */
327 if (atomic_inc_return(&mce_fake_panicked) > 1)
328 return;
329 }
330 /* First print corrected ones that are still unlogged */
331 for (i = 0; i < MCE_LOG_LEN; i++) {
332 struct mce *m = &mcelog.entry[i];
333 if (!(m->status & MCI_STATUS_VAL))
334 continue;
335 if (!(m->status & MCI_STATUS_UC)) {
336 print_mce(m);
337 if (!apei_err)
338 apei_err = apei_write_mce(m);
339 }
340 }
341 /* Now print uncorrected but with the final one last */
342 for (i = 0; i < MCE_LOG_LEN; i++) {
343 struct mce *m = &mcelog.entry[i];
344 if (!(m->status & MCI_STATUS_VAL))
345 continue;
346 if (!(m->status & MCI_STATUS_UC))
347 continue;
348 if (!final || memcmp(m, final, sizeof(struct mce))) {
349 print_mce(m);
350 if (!apei_err)
351 apei_err = apei_write_mce(m);
352 }
353 }
354 if (final) {
355 print_mce(final);
356 if (!apei_err)
357 apei_err = apei_write_mce(final);
358 }
359 if (cpu_missing)
360 pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
361 if (exp)
362 pr_emerg(HW_ERR "Machine check: %s\n", exp);
363 if (!fake_panic) {
364 if (panic_timeout == 0)
365 panic_timeout = mca_cfg.panic_timeout;
366 panic(msg);
367 } else
368 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
369 }
370
371 /* Support code for software error injection */
372
373 static int msr_to_offset(u32 msr)
374 {
375 unsigned bank = __this_cpu_read(injectm.bank);
376
377 if (msr == mca_cfg.rip_msr)
378 return offsetof(struct mce, ip);
379 if (msr == MSR_IA32_MCx_STATUS(bank))
380 return offsetof(struct mce, status);
381 if (msr == MSR_IA32_MCx_ADDR(bank))
382 return offsetof(struct mce, addr);
383 if (msr == MSR_IA32_MCx_MISC(bank))
384 return offsetof(struct mce, misc);
385 if (msr == MSR_IA32_MCG_STATUS)
386 return offsetof(struct mce, mcgstatus);
387 return -1;
388 }
389
390 /* MSR access wrappers used for error injection */
391 static u64 mce_rdmsrl(u32 msr)
392 {
393 u64 v;
394
395 if (__this_cpu_read(injectm.finished)) {
396 int offset = msr_to_offset(msr);
397
398 if (offset < 0)
399 return 0;
400 return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
401 }
402
403 if (rdmsrl_safe(msr, &v)) {
404 WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
405 /*
406 * Return zero in case the access faulted. This should
407 * not happen normally but can happen if the CPU does
408 * something weird, or if the code is buggy.
409 */
410 v = 0;
411 }
412
413 return v;
414 }
415
416 static void mce_wrmsrl(u32 msr, u64 v)
417 {
418 if (__this_cpu_read(injectm.finished)) {
419 int offset = msr_to_offset(msr);
420
421 if (offset >= 0)
422 *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
423 return;
424 }
425 wrmsrl(msr, v);
426 }
427
428 /*
429 * Collect all global (w.r.t. this processor) status about this machine
430 * check into our "mce" struct so that we can use it later to assess
431 * the severity of the problem as we read per-bank specific details.
432 */
433 static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
434 {
435 mce_setup(m);
436
437 m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
438 if (regs) {
439 /*
440 * Get the address of the instruction at the time of
441 * the machine check error.
442 */
443 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
444 m->ip = regs->ip;
445 m->cs = regs->cs;
446
447 /*
448 * When in VM86 mode make the cs look like ring 3
449 * always. This is a lie, but it's better than passing
450 * the additional vm86 bit around everywhere.
451 */
452 if (v8086_mode(regs))
453 m->cs |= 3;
454 }
455 /* Use accurate RIP reporting if available. */
456 if (mca_cfg.rip_msr)
457 m->ip = mce_rdmsrl(mca_cfg.rip_msr);
458 }
459 }
460
461 /*
462 * Simple lockless ring to communicate PFNs from the exception handler with the
463 * process context work function. This is vastly simplified because there's
464 * only a single reader and a single writer.
465 */
466 #define MCE_RING_SIZE 16 /* we use one entry less */
467
468 struct mce_ring {
469 unsigned short start;
470 unsigned short end;
471 unsigned long ring[MCE_RING_SIZE];
472 };
473 static DEFINE_PER_CPU(struct mce_ring, mce_ring);
474
475 /* Runs with CPU affinity in workqueue */
476 static int mce_ring_empty(void)
477 {
478 struct mce_ring *r = this_cpu_ptr(&mce_ring);
479
480 return r->start == r->end;
481 }
482
483 static int mce_ring_get(unsigned long *pfn)
484 {
485 struct mce_ring *r;
486 int ret = 0;
487
488 *pfn = 0;
489 get_cpu();
490 r = this_cpu_ptr(&mce_ring);
491 if (r->start == r->end)
492 goto out;
493 *pfn = r->ring[r->start];
494 r->start = (r->start + 1) % MCE_RING_SIZE;
495 ret = 1;
496 out:
497 put_cpu();
498 return ret;
499 }
500
501 /* Always runs in MCE context with preempt off */
502 static int mce_ring_add(unsigned long pfn)
503 {
504 struct mce_ring *r = this_cpu_ptr(&mce_ring);
505 unsigned next;
506
507 next = (r->end + 1) % MCE_RING_SIZE;
508 if (next == r->start)
509 return -1;
510 r->ring[r->end] = pfn;
511 wmb();
512 r->end = next;
513 return 0;
514 }
515
516 int mce_available(struct cpuinfo_x86 *c)
517 {
518 if (mca_cfg.disabled)
519 return 0;
520 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
521 }
522
523 static void mce_schedule_work(void)
524 {
525 if (!mce_ring_empty())
526 schedule_work(this_cpu_ptr(&mce_work));
527 }
528
529 static DEFINE_PER_CPU(struct irq_work, mce_irq_work);
530
531 static void mce_irq_work_cb(struct irq_work *entry)
532 {
533 mce_notify_irq();
534 mce_schedule_work();
535 }
536
537 static void mce_report_event(struct pt_regs *regs)
538 {
539 if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
540 mce_notify_irq();
541 /*
542 * Triggering the work queue here is just an insurance
543 * policy in case the syscall exit notify handler
544 * doesn't run soon enough or ends up running on the
545 * wrong CPU (can happen when audit sleeps)
546 */
547 mce_schedule_work();
548 return;
549 }
550
551 irq_work_queue(this_cpu_ptr(&mce_irq_work));
552 }
553
554 /*
555 * Read ADDR and MISC registers.
556 */
557 static void mce_read_aux(struct mce *m, int i)
558 {
559 if (m->status & MCI_STATUS_MISCV)
560 m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
561 if (m->status & MCI_STATUS_ADDRV) {
562 m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
563
564 /*
565 * Mask the reported address by the reported granularity.
566 */
567 if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
568 u8 shift = MCI_MISC_ADDR_LSB(m->misc);
569 m->addr >>= shift;
570 m->addr <<= shift;
571 }
572 }
573 }
574
575 static bool memory_error(struct mce *m)
576 {
577 struct cpuinfo_x86 *c = &boot_cpu_data;
578
579 if (c->x86_vendor == X86_VENDOR_AMD) {
580 /*
581 * coming soon
582 */
583 return false;
584 } else if (c->x86_vendor == X86_VENDOR_INTEL) {
585 /*
586 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
587 *
588 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
589 * indicating a memory error. Bit 8 is used for indicating a
590 * cache hierarchy error. The combination of bit 2 and bit 3
591 * is used for indicating a `generic' cache hierarchy error
592 * But we can't just blindly check the above bits, because if
593 * bit 11 is set, then it is a bus/interconnect error - and
594 * either way the above bits just gives more detail on what
595 * bus/interconnect error happened. Note that bit 12 can be
596 * ignored, as it's the "filter" bit.
597 */
598 return (m->status & 0xef80) == BIT(7) ||
599 (m->status & 0xef00) == BIT(8) ||
600 (m->status & 0xeffc) == 0xc;
601 }
602
603 return false;
604 }
605
606 DEFINE_PER_CPU(unsigned, mce_poll_count);
607
608 /*
609 * Poll for corrected events or events that happened before reset.
610 * Those are just logged through /dev/mcelog.
611 *
612 * This is executed in standard interrupt context.
613 *
614 * Note: spec recommends to panic for fatal unsignalled
615 * errors here. However this would be quite problematic --
616 * we would need to reimplement the Monarch handling and
617 * it would mess up the exclusion between exception handler
618 * and poll hander -- * so we skip this for now.
619 * These cases should not happen anyways, or only when the CPU
620 * is already totally * confused. In this case it's likely it will
621 * not fully execute the machine check handler either.
622 */
623 bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
624 {
625 bool error_logged = false;
626 struct mce m;
627 int severity;
628 int i;
629
630 this_cpu_inc(mce_poll_count);
631
632 mce_gather_info(&m, NULL);
633
634 for (i = 0; i < mca_cfg.banks; i++) {
635 if (!mce_banks[i].ctl || !test_bit(i, *b))
636 continue;
637
638 m.misc = 0;
639 m.addr = 0;
640 m.bank = i;
641 m.tsc = 0;
642
643 barrier();
644 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
645 if (!(m.status & MCI_STATUS_VAL))
646 continue;
647
648
649 /*
650 * Uncorrected or signalled events are handled by the exception
651 * handler when it is enabled, so don't process those here.
652 *
653 * TBD do the same check for MCI_STATUS_EN here?
654 */
655 if (!(flags & MCP_UC) &&
656 (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
657 continue;
658
659 mce_read_aux(&m, i);
660
661 if (!(flags & MCP_TIMESTAMP))
662 m.tsc = 0;
663
664 severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
665
666 /*
667 * In the cases where we don't have a valid address after all,
668 * do not add it into the ring buffer.
669 */
670 if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m)) {
671 if (m.status & MCI_STATUS_ADDRV) {
672 mce_ring_add(m.addr >> PAGE_SHIFT);
673 mce_schedule_work();
674 }
675 }
676
677 /*
678 * Don't get the IP here because it's unlikely to
679 * have anything to do with the actual error location.
680 */
681 if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce) {
682 error_logged = true;
683 mce_log(&m);
684 }
685
686 /*
687 * Clear state for this bank.
688 */
689 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
690 }
691
692 /*
693 * Don't clear MCG_STATUS here because it's only defined for
694 * exceptions.
695 */
696
697 sync_core();
698
699 return error_logged;
700 }
701 EXPORT_SYMBOL_GPL(machine_check_poll);
702
703 /*
704 * Do a quick check if any of the events requires a panic.
705 * This decides if we keep the events around or clear them.
706 */
707 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
708 struct pt_regs *regs)
709 {
710 int i, ret = 0;
711
712 for (i = 0; i < mca_cfg.banks; i++) {
713 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
714 if (m->status & MCI_STATUS_VAL) {
715 __set_bit(i, validp);
716 if (quirk_no_way_out)
717 quirk_no_way_out(i, m, regs);
718 }
719 if (mce_severity(m, mca_cfg.tolerant, msg, true) >=
720 MCE_PANIC_SEVERITY)
721 ret = 1;
722 }
723 return ret;
724 }
725
726 /*
727 * Variable to establish order between CPUs while scanning.
728 * Each CPU spins initially until executing is equal its number.
729 */
730 static atomic_t mce_executing;
731
732 /*
733 * Defines order of CPUs on entry. First CPU becomes Monarch.
734 */
735 static atomic_t mce_callin;
736
737 /*
738 * Check if a timeout waiting for other CPUs happened.
739 */
740 static int mce_timed_out(u64 *t, const char *msg)
741 {
742 /*
743 * The others already did panic for some reason.
744 * Bail out like in a timeout.
745 * rmb() to tell the compiler that system_state
746 * might have been modified by someone else.
747 */
748 rmb();
749 if (atomic_read(&mce_panicked))
750 wait_for_panic();
751 if (!mca_cfg.monarch_timeout)
752 goto out;
753 if ((s64)*t < SPINUNIT) {
754 if (mca_cfg.tolerant <= 1)
755 mce_panic(msg, NULL, NULL);
756 cpu_missing = 1;
757 return 1;
758 }
759 *t -= SPINUNIT;
760 out:
761 touch_nmi_watchdog();
762 return 0;
763 }
764
765 /*
766 * The Monarch's reign. The Monarch is the CPU who entered
767 * the machine check handler first. It waits for the others to
768 * raise the exception too and then grades them. When any
769 * error is fatal panic. Only then let the others continue.
770 *
771 * The other CPUs entering the MCE handler will be controlled by the
772 * Monarch. They are called Subjects.
773 *
774 * This way we prevent any potential data corruption in a unrecoverable case
775 * and also makes sure always all CPU's errors are examined.
776 *
777 * Also this detects the case of a machine check event coming from outer
778 * space (not detected by any CPUs) In this case some external agent wants
779 * us to shut down, so panic too.
780 *
781 * The other CPUs might still decide to panic if the handler happens
782 * in a unrecoverable place, but in this case the system is in a semi-stable
783 * state and won't corrupt anything by itself. It's ok to let the others
784 * continue for a bit first.
785 *
786 * All the spin loops have timeouts; when a timeout happens a CPU
787 * typically elects itself to be Monarch.
788 */
789 static void mce_reign(void)
790 {
791 int cpu;
792 struct mce *m = NULL;
793 int global_worst = 0;
794 char *msg = NULL;
795 char *nmsg = NULL;
796
797 /*
798 * This CPU is the Monarch and the other CPUs have run
799 * through their handlers.
800 * Grade the severity of the errors of all the CPUs.
801 */
802 for_each_possible_cpu(cpu) {
803 int severity = mce_severity(&per_cpu(mces_seen, cpu),
804 mca_cfg.tolerant,
805 &nmsg, true);
806 if (severity > global_worst) {
807 msg = nmsg;
808 global_worst = severity;
809 m = &per_cpu(mces_seen, cpu);
810 }
811 }
812
813 /*
814 * Cannot recover? Panic here then.
815 * This dumps all the mces in the log buffer and stops the
816 * other CPUs.
817 */
818 if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
819 mce_panic("Fatal machine check", m, msg);
820
821 /*
822 * For UC somewhere we let the CPU who detects it handle it.
823 * Also must let continue the others, otherwise the handling
824 * CPU could deadlock on a lock.
825 */
826
827 /*
828 * No machine check event found. Must be some external
829 * source or one CPU is hung. Panic.
830 */
831 if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
832 mce_panic("Fatal machine check from unknown source", NULL, NULL);
833
834 /*
835 * Now clear all the mces_seen so that they don't reappear on
836 * the next mce.
837 */
838 for_each_possible_cpu(cpu)
839 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
840 }
841
842 static atomic_t global_nwo;
843
844 /*
845 * Start of Monarch synchronization. This waits until all CPUs have
846 * entered the exception handler and then determines if any of them
847 * saw a fatal event that requires panic. Then it executes them
848 * in the entry order.
849 * TBD double check parallel CPU hotunplug
850 */
851 static int mce_start(int *no_way_out)
852 {
853 int order;
854 int cpus = num_online_cpus();
855 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
856
857 if (!timeout)
858 return -1;
859
860 atomic_add(*no_way_out, &global_nwo);
861 /*
862 * global_nwo should be updated before mce_callin
863 */
864 smp_wmb();
865 order = atomic_inc_return(&mce_callin);
866
867 /*
868 * Wait for everyone.
869 */
870 while (atomic_read(&mce_callin) != cpus) {
871 if (mce_timed_out(&timeout,
872 "Timeout: Not all CPUs entered broadcast exception handler")) {
873 atomic_set(&global_nwo, 0);
874 return -1;
875 }
876 ndelay(SPINUNIT);
877 }
878
879 /*
880 * mce_callin should be read before global_nwo
881 */
882 smp_rmb();
883
884 if (order == 1) {
885 /*
886 * Monarch: Starts executing now, the others wait.
887 */
888 atomic_set(&mce_executing, 1);
889 } else {
890 /*
891 * Subject: Now start the scanning loop one by one in
892 * the original callin order.
893 * This way when there are any shared banks it will be
894 * only seen by one CPU before cleared, avoiding duplicates.
895 */
896 while (atomic_read(&mce_executing) < order) {
897 if (mce_timed_out(&timeout,
898 "Timeout: Subject CPUs unable to finish machine check processing")) {
899 atomic_set(&global_nwo, 0);
900 return -1;
901 }
902 ndelay(SPINUNIT);
903 }
904 }
905
906 /*
907 * Cache the global no_way_out state.
908 */
909 *no_way_out = atomic_read(&global_nwo);
910
911 return order;
912 }
913
914 /*
915 * Synchronize between CPUs after main scanning loop.
916 * This invokes the bulk of the Monarch processing.
917 */
918 static int mce_end(int order)
919 {
920 int ret = -1;
921 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
922
923 if (!timeout)
924 goto reset;
925 if (order < 0)
926 goto reset;
927
928 /*
929 * Allow others to run.
930 */
931 atomic_inc(&mce_executing);
932
933 if (order == 1) {
934 /* CHECKME: Can this race with a parallel hotplug? */
935 int cpus = num_online_cpus();
936
937 /*
938 * Monarch: Wait for everyone to go through their scanning
939 * loops.
940 */
941 while (atomic_read(&mce_executing) <= cpus) {
942 if (mce_timed_out(&timeout,
943 "Timeout: Monarch CPU unable to finish machine check processing"))
944 goto reset;
945 ndelay(SPINUNIT);
946 }
947
948 mce_reign();
949 barrier();
950 ret = 0;
951 } else {
952 /*
953 * Subject: Wait for Monarch to finish.
954 */
955 while (atomic_read(&mce_executing) != 0) {
956 if (mce_timed_out(&timeout,
957 "Timeout: Monarch CPU did not finish machine check processing"))
958 goto reset;
959 ndelay(SPINUNIT);
960 }
961
962 /*
963 * Don't reset anything. That's done by the Monarch.
964 */
965 return 0;
966 }
967
968 /*
969 * Reset all global state.
970 */
971 reset:
972 atomic_set(&global_nwo, 0);
973 atomic_set(&mce_callin, 0);
974 barrier();
975
976 /*
977 * Let others run again.
978 */
979 atomic_set(&mce_executing, 0);
980 return ret;
981 }
982
983 /*
984 * Check if the address reported by the CPU is in a format we can parse.
985 * It would be possible to add code for most other cases, but all would
986 * be somewhat complicated (e.g. segment offset would require an instruction
987 * parser). So only support physical addresses up to page granuality for now.
988 */
989 static int mce_usable_address(struct mce *m)
990 {
991 if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
992 return 0;
993 if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
994 return 0;
995 if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
996 return 0;
997 return 1;
998 }
999
1000 static void mce_clear_state(unsigned long *toclear)
1001 {
1002 int i;
1003
1004 for (i = 0; i < mca_cfg.banks; i++) {
1005 if (test_bit(i, toclear))
1006 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
1007 }
1008 }
1009
1010 /*
1011 * The actual machine check handler. This only handles real
1012 * exceptions when something got corrupted coming in through int 18.
1013 *
1014 * This is executed in NMI context not subject to normal locking rules. This
1015 * implies that most kernel services cannot be safely used. Don't even
1016 * think about putting a printk in there!
1017 *
1018 * On Intel systems this is entered on all CPUs in parallel through
1019 * MCE broadcast. However some CPUs might be broken beyond repair,
1020 * so be always careful when synchronizing with others.
1021 */
1022 void do_machine_check(struct pt_regs *regs, long error_code)
1023 {
1024 struct mca_config *cfg = &mca_cfg;
1025 struct mce m, *final;
1026 enum ctx_state prev_state;
1027 int i;
1028 int worst = 0;
1029 int severity;
1030 /*
1031 * Establish sequential order between the CPUs entering the machine
1032 * check handler.
1033 */
1034 int order;
1035 /*
1036 * If no_way_out gets set, there is no safe way to recover from this
1037 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
1038 */
1039 int no_way_out = 0;
1040 /*
1041 * If kill_it gets set, there might be a way to recover from this
1042 * error.
1043 */
1044 int kill_it = 0;
1045 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1046 DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1047 char *msg = "Unknown";
1048 u64 recover_paddr = ~0ull;
1049 int flags = MF_ACTION_REQUIRED;
1050 int lmce = 0;
1051
1052 prev_state = ist_enter(regs);
1053
1054 this_cpu_inc(mce_exception_count);
1055
1056 if (!cfg->banks)
1057 goto out;
1058
1059 mce_gather_info(&m, regs);
1060
1061 final = this_cpu_ptr(&mces_seen);
1062 *final = m;
1063
1064 memset(valid_banks, 0, sizeof(valid_banks));
1065 no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1066
1067 barrier();
1068
1069 /*
1070 * When no restart IP might need to kill or panic.
1071 * Assume the worst for now, but if we find the
1072 * severity is MCE_AR_SEVERITY we have other options.
1073 */
1074 if (!(m.mcgstatus & MCG_STATUS_RIPV))
1075 kill_it = 1;
1076
1077 /*
1078 * Check if this MCE is signaled to only this logical processor
1079 */
1080 if (m.mcgstatus & MCG_STATUS_LMCES)
1081 lmce = 1;
1082 else {
1083 /*
1084 * Go through all the banks in exclusion of the other CPUs.
1085 * This way we don't report duplicated events on shared banks
1086 * because the first one to see it will clear it.
1087 * If this is a Local MCE, then no need to perform rendezvous.
1088 */
1089 order = mce_start(&no_way_out);
1090 }
1091
1092 for (i = 0; i < cfg->banks; i++) {
1093 __clear_bit(i, toclear);
1094 if (!test_bit(i, valid_banks))
1095 continue;
1096 if (!mce_banks[i].ctl)
1097 continue;
1098
1099 m.misc = 0;
1100 m.addr = 0;
1101 m.bank = i;
1102
1103 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
1104 if ((m.status & MCI_STATUS_VAL) == 0)
1105 continue;
1106
1107 /*
1108 * Non uncorrected or non signaled errors are handled by
1109 * machine_check_poll. Leave them alone, unless this panics.
1110 */
1111 if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1112 !no_way_out)
1113 continue;
1114
1115 /*
1116 * Set taint even when machine check was not enabled.
1117 */
1118 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1119
1120 severity = mce_severity(&m, cfg->tolerant, NULL, true);
1121
1122 /*
1123 * When machine check was for corrected/deferred handler don't
1124 * touch, unless we're panicing.
1125 */
1126 if ((severity == MCE_KEEP_SEVERITY ||
1127 severity == MCE_UCNA_SEVERITY) && !no_way_out)
1128 continue;
1129 __set_bit(i, toclear);
1130 if (severity == MCE_NO_SEVERITY) {
1131 /*
1132 * Machine check event was not enabled. Clear, but
1133 * ignore.
1134 */
1135 continue;
1136 }
1137
1138 mce_read_aux(&m, i);
1139
1140 /*
1141 * Action optional error. Queue address for later processing.
1142 * When the ring overflows we just ignore the AO error.
1143 * RED-PEN add some logging mechanism when
1144 * usable_address or mce_add_ring fails.
1145 * RED-PEN don't ignore overflow for mca_cfg.tolerant == 0
1146 */
1147 if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
1148 mce_ring_add(m.addr >> PAGE_SHIFT);
1149
1150 mce_log(&m);
1151
1152 if (severity > worst) {
1153 *final = m;
1154 worst = severity;
1155 }
1156 }
1157
1158 /* mce_clear_state will clear *final, save locally for use later */
1159 m = *final;
1160
1161 if (!no_way_out)
1162 mce_clear_state(toclear);
1163
1164 /*
1165 * Do most of the synchronization with other CPUs.
1166 * When there's any problem use only local no_way_out state.
1167 */
1168 if (!lmce) {
1169 if (mce_end(order) < 0)
1170 no_way_out = worst >= MCE_PANIC_SEVERITY;
1171 } else {
1172 /*
1173 * Local MCE skipped calling mce_reign()
1174 * If we found a fatal error, we need to panic here.
1175 */
1176 if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
1177 mce_panic("Machine check from unknown source",
1178 NULL, NULL);
1179 }
1180
1181 /*
1182 * At insane "tolerant" levels we take no action. Otherwise
1183 * we only die if we have no other choice. For less serious
1184 * issues we try to recover, or limit damage to the current
1185 * process.
1186 */
1187 if (cfg->tolerant < 3) {
1188 if (no_way_out)
1189 mce_panic("Fatal machine check on current CPU", &m, msg);
1190 if (worst == MCE_AR_SEVERITY) {
1191 recover_paddr = m.addr;
1192 if (!(m.mcgstatus & MCG_STATUS_RIPV))
1193 flags |= MF_MUST_KILL;
1194 } else if (kill_it) {
1195 force_sig(SIGBUS, current);
1196 }
1197 }
1198
1199 if (worst > 0)
1200 mce_report_event(regs);
1201 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1202 out:
1203 sync_core();
1204
1205 if (recover_paddr == ~0ull)
1206 goto done;
1207
1208 pr_err("Uncorrected hardware memory error in user-access at %llx",
1209 recover_paddr);
1210 /*
1211 * We must call memory_failure() here even if the current process is
1212 * doomed. We still need to mark the page as poisoned and alert any
1213 * other users of the page.
1214 */
1215 ist_begin_non_atomic(regs);
1216 local_irq_enable();
1217 if (memory_failure(recover_paddr >> PAGE_SHIFT, MCE_VECTOR, flags) < 0) {
1218 pr_err("Memory error not recovered");
1219 force_sig(SIGBUS, current);
1220 }
1221 local_irq_disable();
1222 ist_end_non_atomic();
1223 done:
1224 ist_exit(regs, prev_state);
1225 }
1226 EXPORT_SYMBOL_GPL(do_machine_check);
1227
1228 #ifndef CONFIG_MEMORY_FAILURE
1229 int memory_failure(unsigned long pfn, int vector, int flags)
1230 {
1231 /* mce_severity() should not hand us an ACTION_REQUIRED error */
1232 BUG_ON(flags & MF_ACTION_REQUIRED);
1233 pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1234 "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1235 pfn);
1236
1237 return 0;
1238 }
1239 #endif
1240
1241 /*
1242 * Action optional processing happens here (picking up
1243 * from the list of faulting pages that do_machine_check()
1244 * placed into the "ring").
1245 */
1246 static void mce_process_work(struct work_struct *dummy)
1247 {
1248 unsigned long pfn;
1249
1250 while (mce_ring_get(&pfn))
1251 memory_failure(pfn, MCE_VECTOR, 0);
1252 }
1253
1254 #ifdef CONFIG_X86_MCE_INTEL
1255 /***
1256 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
1257 * @cpu: The CPU on which the event occurred.
1258 * @status: Event status information
1259 *
1260 * This function should be called by the thermal interrupt after the
1261 * event has been processed and the decision was made to log the event
1262 * further.
1263 *
1264 * The status parameter will be saved to the 'status' field of 'struct mce'
1265 * and historically has been the register value of the
1266 * MSR_IA32_THERMAL_STATUS (Intel) msr.
1267 */
1268 void mce_log_therm_throt_event(__u64 status)
1269 {
1270 struct mce m;
1271
1272 mce_setup(&m);
1273 m.bank = MCE_THERMAL_BANK;
1274 m.status = status;
1275 mce_log(&m);
1276 }
1277 #endif /* CONFIG_X86_MCE_INTEL */
1278
1279 /*
1280 * Periodic polling timer for "silent" machine check errors. If the
1281 * poller finds an MCE, poll 2x faster. When the poller finds no more
1282 * errors, poll 2x slower (up to check_interval seconds).
1283 */
1284 static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
1285
1286 static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1287 static DEFINE_PER_CPU(struct timer_list, mce_timer);
1288
1289 static unsigned long mce_adjust_timer_default(unsigned long interval)
1290 {
1291 return interval;
1292 }
1293
1294 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
1295
1296 static void __restart_timer(struct timer_list *t, unsigned long interval)
1297 {
1298 unsigned long when = jiffies + interval;
1299 unsigned long flags;
1300
1301 local_irq_save(flags);
1302
1303 if (timer_pending(t)) {
1304 if (time_before(when, t->expires))
1305 mod_timer_pinned(t, when);
1306 } else {
1307 t->expires = round_jiffies(when);
1308 add_timer_on(t, smp_processor_id());
1309 }
1310
1311 local_irq_restore(flags);
1312 }
1313
1314 static void mce_timer_fn(unsigned long data)
1315 {
1316 struct timer_list *t = this_cpu_ptr(&mce_timer);
1317 int cpu = smp_processor_id();
1318 unsigned long iv;
1319
1320 WARN_ON(cpu != data);
1321
1322 iv = __this_cpu_read(mce_next_interval);
1323
1324 if (mce_available(this_cpu_ptr(&cpu_info))) {
1325 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_poll_banks));
1326
1327 if (mce_intel_cmci_poll()) {
1328 iv = mce_adjust_timer(iv);
1329 goto done;
1330 }
1331 }
1332
1333 /*
1334 * Alert userspace if needed. If we logged an MCE, reduce the polling
1335 * interval, otherwise increase the polling interval.
1336 */
1337 if (mce_notify_irq())
1338 iv = max(iv / 2, (unsigned long) HZ/100);
1339 else
1340 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1341
1342 done:
1343 __this_cpu_write(mce_next_interval, iv);
1344 __restart_timer(t, iv);
1345 }
1346
1347 /*
1348 * Ensure that the timer is firing in @interval from now.
1349 */
1350 void mce_timer_kick(unsigned long interval)
1351 {
1352 struct timer_list *t = this_cpu_ptr(&mce_timer);
1353 unsigned long iv = __this_cpu_read(mce_next_interval);
1354
1355 __restart_timer(t, interval);
1356
1357 if (interval < iv)
1358 __this_cpu_write(mce_next_interval, interval);
1359 }
1360
1361 /* Must not be called in IRQ context where del_timer_sync() can deadlock */
1362 static void mce_timer_delete_all(void)
1363 {
1364 int cpu;
1365
1366 for_each_online_cpu(cpu)
1367 del_timer_sync(&per_cpu(mce_timer, cpu));
1368 }
1369
1370 static void mce_do_trigger(struct work_struct *work)
1371 {
1372 call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
1373 }
1374
1375 static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
1376
1377 /*
1378 * Notify the user(s) about new machine check events.
1379 * Can be called from interrupt context, but not from machine check/NMI
1380 * context.
1381 */
1382 int mce_notify_irq(void)
1383 {
1384 /* Not more than two messages every minute */
1385 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1386
1387 if (test_and_clear_bit(0, &mce_need_notify)) {
1388 /* wake processes polling /dev/mcelog */
1389 wake_up_interruptible(&mce_chrdev_wait);
1390
1391 if (mce_helper[0])
1392 schedule_work(&mce_trigger_work);
1393
1394 if (__ratelimit(&ratelimit))
1395 pr_info(HW_ERR "Machine check events logged\n");
1396
1397 return 1;
1398 }
1399 return 0;
1400 }
1401 EXPORT_SYMBOL_GPL(mce_notify_irq);
1402
1403 static int __mcheck_cpu_mce_banks_init(void)
1404 {
1405 int i;
1406 u8 num_banks = mca_cfg.banks;
1407
1408 mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
1409 if (!mce_banks)
1410 return -ENOMEM;
1411
1412 for (i = 0; i < num_banks; i++) {
1413 struct mce_bank *b = &mce_banks[i];
1414
1415 b->ctl = -1ULL;
1416 b->init = 1;
1417 }
1418 return 0;
1419 }
1420
1421 /*
1422 * Initialize Machine Checks for a CPU.
1423 */
1424 static int __mcheck_cpu_cap_init(void)
1425 {
1426 unsigned b;
1427 u64 cap;
1428
1429 rdmsrl(MSR_IA32_MCG_CAP, cap);
1430
1431 b = cap & MCG_BANKCNT_MASK;
1432 if (!mca_cfg.banks)
1433 pr_info("CPU supports %d MCE banks\n", b);
1434
1435 if (b > MAX_NR_BANKS) {
1436 pr_warn("Using only %u machine check banks out of %u\n",
1437 MAX_NR_BANKS, b);
1438 b = MAX_NR_BANKS;
1439 }
1440
1441 /* Don't support asymmetric configurations today */
1442 WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1443 mca_cfg.banks = b;
1444
1445 if (!mce_banks) {
1446 int err = __mcheck_cpu_mce_banks_init();
1447
1448 if (err)
1449 return err;
1450 }
1451
1452 /* Use accurate RIP reporting if available. */
1453 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1454 mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1455
1456 if (cap & MCG_SER_P)
1457 mca_cfg.ser = true;
1458
1459 return 0;
1460 }
1461
1462 static void __mcheck_cpu_init_generic(void)
1463 {
1464 enum mcp_flags m_fl = 0;
1465 mce_banks_t all_banks;
1466 u64 cap;
1467 int i;
1468
1469 if (!mca_cfg.bootlog)
1470 m_fl = MCP_DONTLOG;
1471
1472 /*
1473 * Log the machine checks left over from the previous reset.
1474 */
1475 bitmap_fill(all_banks, MAX_NR_BANKS);
1476 machine_check_poll(MCP_UC | m_fl, &all_banks);
1477
1478 cr4_set_bits(X86_CR4_MCE);
1479
1480 rdmsrl(MSR_IA32_MCG_CAP, cap);
1481 if (cap & MCG_CTL_P)
1482 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1483
1484 for (i = 0; i < mca_cfg.banks; i++) {
1485 struct mce_bank *b = &mce_banks[i];
1486
1487 if (!b->init)
1488 continue;
1489 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
1490 wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
1491 }
1492 }
1493
1494 /*
1495 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1496 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1497 * Vol 3B Table 15-20). But this confuses both the code that determines
1498 * whether the machine check occurred in kernel or user mode, and also
1499 * the severity assessment code. Pretend that EIPV was set, and take the
1500 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1501 */
1502 static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1503 {
1504 if (bank != 0)
1505 return;
1506 if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1507 return;
1508 if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1509 MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1510 MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1511 MCACOD)) !=
1512 (MCI_STATUS_UC|MCI_STATUS_EN|
1513 MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1514 MCI_STATUS_AR|MCACOD_INSTR))
1515 return;
1516
1517 m->mcgstatus |= MCG_STATUS_EIPV;
1518 m->ip = regs->ip;
1519 m->cs = regs->cs;
1520 }
1521
1522 /* Add per CPU specific workarounds here */
1523 static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1524 {
1525 struct mca_config *cfg = &mca_cfg;
1526
1527 if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1528 pr_info("unknown CPU type - not enabling MCE support\n");
1529 return -EOPNOTSUPP;
1530 }
1531
1532 /* This should be disabled by the BIOS, but isn't always */
1533 if (c->x86_vendor == X86_VENDOR_AMD) {
1534 if (c->x86 == 15 && cfg->banks > 4) {
1535 /*
1536 * disable GART TBL walk error reporting, which
1537 * trips off incorrectly with the IOMMU & 3ware
1538 * & Cerberus:
1539 */
1540 clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1541 }
1542 if (c->x86 <= 17 && cfg->bootlog < 0) {
1543 /*
1544 * Lots of broken BIOS around that don't clear them
1545 * by default and leave crap in there. Don't log:
1546 */
1547 cfg->bootlog = 0;
1548 }
1549 /*
1550 * Various K7s with broken bank 0 around. Always disable
1551 * by default.
1552 */
1553 if (c->x86 == 6 && cfg->banks > 0)
1554 mce_banks[0].ctl = 0;
1555
1556 /*
1557 * overflow_recov is supported for F15h Models 00h-0fh
1558 * even though we don't have a CPUID bit for it.
1559 */
1560 if (c->x86 == 0x15 && c->x86_model <= 0xf)
1561 mce_flags.overflow_recov = 1;
1562
1563 /*
1564 * Turn off MC4_MISC thresholding banks on those models since
1565 * they're not supported there.
1566 */
1567 if (c->x86 == 0x15 &&
1568 (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
1569 int i;
1570 u64 hwcr;
1571 bool need_toggle;
1572 u32 msrs[] = {
1573 0x00000413, /* MC4_MISC0 */
1574 0xc0000408, /* MC4_MISC1 */
1575 };
1576
1577 rdmsrl(MSR_K7_HWCR, hwcr);
1578
1579 /* McStatusWrEn has to be set */
1580 need_toggle = !(hwcr & BIT(18));
1581
1582 if (need_toggle)
1583 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
1584
1585 /* Clear CntP bit safely */
1586 for (i = 0; i < ARRAY_SIZE(msrs); i++)
1587 msr_clear_bit(msrs[i], 62);
1588
1589 /* restore old settings */
1590 if (need_toggle)
1591 wrmsrl(MSR_K7_HWCR, hwcr);
1592 }
1593 }
1594
1595 if (c->x86_vendor == X86_VENDOR_INTEL) {
1596 /*
1597 * SDM documents that on family 6 bank 0 should not be written
1598 * because it aliases to another special BIOS controlled
1599 * register.
1600 * But it's not aliased anymore on model 0x1a+
1601 * Don't ignore bank 0 completely because there could be a
1602 * valid event later, merely don't write CTL0.
1603 */
1604
1605 if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
1606 mce_banks[0].init = 0;
1607
1608 /*
1609 * All newer Intel systems support MCE broadcasting. Enable
1610 * synchronization with a one second timeout.
1611 */
1612 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1613 cfg->monarch_timeout < 0)
1614 cfg->monarch_timeout = USEC_PER_SEC;
1615
1616 /*
1617 * There are also broken BIOSes on some Pentium M and
1618 * earlier systems:
1619 */
1620 if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1621 cfg->bootlog = 0;
1622
1623 if (c->x86 == 6 && c->x86_model == 45)
1624 quirk_no_way_out = quirk_sandybridge_ifu;
1625 }
1626 if (cfg->monarch_timeout < 0)
1627 cfg->monarch_timeout = 0;
1628 if (cfg->bootlog != 0)
1629 cfg->panic_timeout = 30;
1630
1631 return 0;
1632 }
1633
1634 static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1635 {
1636 if (c->x86 != 5)
1637 return 0;
1638
1639 switch (c->x86_vendor) {
1640 case X86_VENDOR_INTEL:
1641 intel_p5_mcheck_init(c);
1642 return 1;
1643 break;
1644 case X86_VENDOR_CENTAUR:
1645 winchip_mcheck_init(c);
1646 return 1;
1647 break;
1648 }
1649
1650 return 0;
1651 }
1652
1653 static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1654 {
1655 switch (c->x86_vendor) {
1656 case X86_VENDOR_INTEL:
1657 mce_intel_feature_init(c);
1658 mce_adjust_timer = cmci_intel_adjust_timer;
1659 break;
1660
1661 case X86_VENDOR_AMD: {
1662 u32 ebx = cpuid_ebx(0x80000007);
1663
1664 mce_amd_feature_init(c);
1665 mce_flags.overflow_recov = !!(ebx & BIT(0));
1666 mce_flags.succor = !!(ebx & BIT(1));
1667 break;
1668 }
1669
1670 default:
1671 break;
1672 }
1673 }
1674
1675 static void mce_start_timer(unsigned int cpu, struct timer_list *t)
1676 {
1677 unsigned long iv = check_interval * HZ;
1678
1679 if (mca_cfg.ignore_ce || !iv)
1680 return;
1681
1682 per_cpu(mce_next_interval, cpu) = iv;
1683
1684 t->expires = round_jiffies(jiffies + iv);
1685 add_timer_on(t, cpu);
1686 }
1687
1688 static void __mcheck_cpu_init_timer(void)
1689 {
1690 struct timer_list *t = this_cpu_ptr(&mce_timer);
1691 unsigned int cpu = smp_processor_id();
1692
1693 setup_timer(t, mce_timer_fn, cpu);
1694 mce_start_timer(cpu, t);
1695 }
1696
1697 /* Handle unconfigured int18 (should never happen) */
1698 static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1699 {
1700 pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
1701 smp_processor_id());
1702 }
1703
1704 /* Call the installed machine check handler for this CPU setup. */
1705 void (*machine_check_vector)(struct pt_regs *, long error_code) =
1706 unexpected_machine_check;
1707
1708 /*
1709 * Called for each booted CPU to set up machine checks.
1710 * Must be called with preempt off:
1711 */
1712 void mcheck_cpu_init(struct cpuinfo_x86 *c)
1713 {
1714 if (mca_cfg.disabled)
1715 return;
1716
1717 if (__mcheck_cpu_ancient_init(c))
1718 return;
1719
1720 if (!mce_available(c))
1721 return;
1722
1723 if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
1724 mca_cfg.disabled = true;
1725 return;
1726 }
1727
1728 machine_check_vector = do_machine_check;
1729
1730 __mcheck_cpu_init_generic();
1731 __mcheck_cpu_init_vendor(c);
1732 __mcheck_cpu_init_timer();
1733 INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work);
1734 init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb);
1735 }
1736
1737 /*
1738 * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
1739 */
1740
1741 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
1742 static int mce_chrdev_open_count; /* #times opened */
1743 static int mce_chrdev_open_exclu; /* already open exclusive? */
1744
1745 static int mce_chrdev_open(struct inode *inode, struct file *file)
1746 {
1747 spin_lock(&mce_chrdev_state_lock);
1748
1749 if (mce_chrdev_open_exclu ||
1750 (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
1751 spin_unlock(&mce_chrdev_state_lock);
1752
1753 return -EBUSY;
1754 }
1755
1756 if (file->f_flags & O_EXCL)
1757 mce_chrdev_open_exclu = 1;
1758 mce_chrdev_open_count++;
1759
1760 spin_unlock(&mce_chrdev_state_lock);
1761
1762 return nonseekable_open(inode, file);
1763 }
1764
1765 static int mce_chrdev_release(struct inode *inode, struct file *file)
1766 {
1767 spin_lock(&mce_chrdev_state_lock);
1768
1769 mce_chrdev_open_count--;
1770 mce_chrdev_open_exclu = 0;
1771
1772 spin_unlock(&mce_chrdev_state_lock);
1773
1774 return 0;
1775 }
1776
1777 static void collect_tscs(void *data)
1778 {
1779 unsigned long *cpu_tsc = (unsigned long *)data;
1780
1781 rdtscll(cpu_tsc[smp_processor_id()]);
1782 }
1783
1784 static int mce_apei_read_done;
1785
1786 /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
1787 static int __mce_read_apei(char __user **ubuf, size_t usize)
1788 {
1789 int rc;
1790 u64 record_id;
1791 struct mce m;
1792
1793 if (usize < sizeof(struct mce))
1794 return -EINVAL;
1795
1796 rc = apei_read_mce(&m, &record_id);
1797 /* Error or no more MCE record */
1798 if (rc <= 0) {
1799 mce_apei_read_done = 1;
1800 /*
1801 * When ERST is disabled, mce_chrdev_read() should return
1802 * "no record" instead of "no device."
1803 */
1804 if (rc == -ENODEV)
1805 return 0;
1806 return rc;
1807 }
1808 rc = -EFAULT;
1809 if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
1810 return rc;
1811 /*
1812 * In fact, we should have cleared the record after that has
1813 * been flushed to the disk or sent to network in
1814 * /sbin/mcelog, but we have no interface to support that now,
1815 * so just clear it to avoid duplication.
1816 */
1817 rc = apei_clear_mce(record_id);
1818 if (rc) {
1819 mce_apei_read_done = 1;
1820 return rc;
1821 }
1822 *ubuf += sizeof(struct mce);
1823
1824 return 0;
1825 }
1826
1827 static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
1828 size_t usize, loff_t *off)
1829 {
1830 char __user *buf = ubuf;
1831 unsigned long *cpu_tsc;
1832 unsigned prev, next;
1833 int i, err;
1834
1835 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
1836 if (!cpu_tsc)
1837 return -ENOMEM;
1838
1839 mutex_lock(&mce_chrdev_read_mutex);
1840
1841 if (!mce_apei_read_done) {
1842 err = __mce_read_apei(&buf, usize);
1843 if (err || buf != ubuf)
1844 goto out;
1845 }
1846
1847 next = rcu_dereference_check_mce(mcelog.next);
1848
1849 /* Only supports full reads right now */
1850 err = -EINVAL;
1851 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
1852 goto out;
1853
1854 err = 0;
1855 prev = 0;
1856 do {
1857 for (i = prev; i < next; i++) {
1858 unsigned long start = jiffies;
1859 struct mce *m = &mcelog.entry[i];
1860
1861 while (!m->finished) {
1862 if (time_after_eq(jiffies, start + 2)) {
1863 memset(m, 0, sizeof(*m));
1864 goto timeout;
1865 }
1866 cpu_relax();
1867 }
1868 smp_rmb();
1869 err |= copy_to_user(buf, m, sizeof(*m));
1870 buf += sizeof(*m);
1871 timeout:
1872 ;
1873 }
1874
1875 memset(mcelog.entry + prev, 0,
1876 (next - prev) * sizeof(struct mce));
1877 prev = next;
1878 next = cmpxchg(&mcelog.next, prev, 0);
1879 } while (next != prev);
1880
1881 synchronize_sched();
1882
1883 /*
1884 * Collect entries that were still getting written before the
1885 * synchronize.
1886 */
1887 on_each_cpu(collect_tscs, cpu_tsc, 1);
1888
1889 for (i = next; i < MCE_LOG_LEN; i++) {
1890 struct mce *m = &mcelog.entry[i];
1891
1892 if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
1893 err |= copy_to_user(buf, m, sizeof(*m));
1894 smp_rmb();
1895 buf += sizeof(*m);
1896 memset(m, 0, sizeof(*m));
1897 }
1898 }
1899
1900 if (err)
1901 err = -EFAULT;
1902
1903 out:
1904 mutex_unlock(&mce_chrdev_read_mutex);
1905 kfree(cpu_tsc);
1906
1907 return err ? err : buf - ubuf;
1908 }
1909
1910 static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
1911 {
1912 poll_wait(file, &mce_chrdev_wait, wait);
1913 if (rcu_access_index(mcelog.next))
1914 return POLLIN | POLLRDNORM;
1915 if (!mce_apei_read_done && apei_check_mce())
1916 return POLLIN | POLLRDNORM;
1917 return 0;
1918 }
1919
1920 static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
1921 unsigned long arg)
1922 {
1923 int __user *p = (int __user *)arg;
1924
1925 if (!capable(CAP_SYS_ADMIN))
1926 return -EPERM;
1927
1928 switch (cmd) {
1929 case MCE_GET_RECORD_LEN:
1930 return put_user(sizeof(struct mce), p);
1931 case MCE_GET_LOG_LEN:
1932 return put_user(MCE_LOG_LEN, p);
1933 case MCE_GETCLEAR_FLAGS: {
1934 unsigned flags;
1935
1936 do {
1937 flags = mcelog.flags;
1938 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
1939
1940 return put_user(flags, p);
1941 }
1942 default:
1943 return -ENOTTY;
1944 }
1945 }
1946
1947 static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
1948 size_t usize, loff_t *off);
1949
1950 void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
1951 const char __user *ubuf,
1952 size_t usize, loff_t *off))
1953 {
1954 mce_write = fn;
1955 }
1956 EXPORT_SYMBOL_GPL(register_mce_write_callback);
1957
1958 ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
1959 size_t usize, loff_t *off)
1960 {
1961 if (mce_write)
1962 return mce_write(filp, ubuf, usize, off);
1963 else
1964 return -EINVAL;
1965 }
1966
1967 static const struct file_operations mce_chrdev_ops = {
1968 .open = mce_chrdev_open,
1969 .release = mce_chrdev_release,
1970 .read = mce_chrdev_read,
1971 .write = mce_chrdev_write,
1972 .poll = mce_chrdev_poll,
1973 .unlocked_ioctl = mce_chrdev_ioctl,
1974 .llseek = no_llseek,
1975 };
1976
1977 static struct miscdevice mce_chrdev_device = {
1978 MISC_MCELOG_MINOR,
1979 "mcelog",
1980 &mce_chrdev_ops,
1981 };
1982
1983 static void __mce_disable_bank(void *arg)
1984 {
1985 int bank = *((int *)arg);
1986 __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
1987 cmci_disable_bank(bank);
1988 }
1989
1990 void mce_disable_bank(int bank)
1991 {
1992 if (bank >= mca_cfg.banks) {
1993 pr_warn(FW_BUG
1994 "Ignoring request to disable invalid MCA bank %d.\n",
1995 bank);
1996 return;
1997 }
1998 set_bit(bank, mce_banks_ce_disabled);
1999 on_each_cpu(__mce_disable_bank, &bank, 1);
2000 }
2001
2002 /*
2003 * mce=off Disables machine check
2004 * mce=no_cmci Disables CMCI
2005 * mce=no_lmce Disables LMCE
2006 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
2007 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
2008 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
2009 * monarchtimeout is how long to wait for other CPUs on machine
2010 * check, or 0 to not wait
2011 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
2012 * mce=nobootlog Don't log MCEs from before booting.
2013 * mce=bios_cmci_threshold Don't program the CMCI threshold
2014 */
2015 static int __init mcheck_enable(char *str)
2016 {
2017 struct mca_config *cfg = &mca_cfg;
2018
2019 if (*str == 0) {
2020 enable_p5_mce();
2021 return 1;
2022 }
2023 if (*str == '=')
2024 str++;
2025 if (!strcmp(str, "off"))
2026 cfg->disabled = true;
2027 else if (!strcmp(str, "no_cmci"))
2028 cfg->cmci_disabled = true;
2029 else if (!strcmp(str, "no_lmce"))
2030 cfg->lmce_disabled = true;
2031 else if (!strcmp(str, "dont_log_ce"))
2032 cfg->dont_log_ce = true;
2033 else if (!strcmp(str, "ignore_ce"))
2034 cfg->ignore_ce = true;
2035 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
2036 cfg->bootlog = (str[0] == 'b');
2037 else if (!strcmp(str, "bios_cmci_threshold"))
2038 cfg->bios_cmci_threshold = true;
2039 else if (isdigit(str[0])) {
2040 if (get_option(&str, &cfg->tolerant) == 2)
2041 get_option(&str, &(cfg->monarch_timeout));
2042 } else {
2043 pr_info("mce argument %s ignored. Please use /sys\n", str);
2044 return 0;
2045 }
2046 return 1;
2047 }
2048 __setup("mce", mcheck_enable);
2049
2050 int __init mcheck_init(void)
2051 {
2052 mcheck_intel_therm_init();
2053 mcheck_vendor_init_severity();
2054
2055 return 0;
2056 }
2057
2058 /*
2059 * mce_syscore: PM support
2060 */
2061
2062 /*
2063 * Disable machine checks on suspend and shutdown. We can't really handle
2064 * them later.
2065 */
2066 static int mce_disable_error_reporting(void)
2067 {
2068 int i;
2069
2070 for (i = 0; i < mca_cfg.banks; i++) {
2071 struct mce_bank *b = &mce_banks[i];
2072
2073 if (b->init)
2074 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
2075 }
2076 return 0;
2077 }
2078
2079 static int mce_syscore_suspend(void)
2080 {
2081 return mce_disable_error_reporting();
2082 }
2083
2084 static void mce_syscore_shutdown(void)
2085 {
2086 mce_disable_error_reporting();
2087 }
2088
2089 /*
2090 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2091 * Only one CPU is active at this time, the others get re-added later using
2092 * CPU hotplug:
2093 */
2094 static void mce_syscore_resume(void)
2095 {
2096 __mcheck_cpu_init_generic();
2097 __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2098 }
2099
2100 static struct syscore_ops mce_syscore_ops = {
2101 .suspend = mce_syscore_suspend,
2102 .shutdown = mce_syscore_shutdown,
2103 .resume = mce_syscore_resume,
2104 };
2105
2106 /*
2107 * mce_device: Sysfs support
2108 */
2109
2110 static void mce_cpu_restart(void *data)
2111 {
2112 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2113 return;
2114 __mcheck_cpu_init_generic();
2115 __mcheck_cpu_init_timer();
2116 }
2117
2118 /* Reinit MCEs after user configuration changes */
2119 static void mce_restart(void)
2120 {
2121 mce_timer_delete_all();
2122 on_each_cpu(mce_cpu_restart, NULL, 1);
2123 }
2124
2125 /* Toggle features for corrected errors */
2126 static void mce_disable_cmci(void *data)
2127 {
2128 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2129 return;
2130 cmci_clear();
2131 }
2132
2133 static void mce_enable_ce(void *all)
2134 {
2135 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2136 return;
2137 cmci_reenable();
2138 cmci_recheck();
2139 if (all)
2140 __mcheck_cpu_init_timer();
2141 }
2142
2143 static struct bus_type mce_subsys = {
2144 .name = "machinecheck",
2145 .dev_name = "machinecheck",
2146 };
2147
2148 DEFINE_PER_CPU(struct device *, mce_device);
2149
2150 void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
2151
2152 static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
2153 {
2154 return container_of(attr, struct mce_bank, attr);
2155 }
2156
2157 static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2158 char *buf)
2159 {
2160 return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
2161 }
2162
2163 static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2164 const char *buf, size_t size)
2165 {
2166 u64 new;
2167
2168 if (kstrtou64(buf, 0, &new) < 0)
2169 return -EINVAL;
2170
2171 attr_to_bank(attr)->ctl = new;
2172 mce_restart();
2173
2174 return size;
2175 }
2176
2177 static ssize_t
2178 show_trigger(struct device *s, struct device_attribute *attr, char *buf)
2179 {
2180 strcpy(buf, mce_helper);
2181 strcat(buf, "\n");
2182 return strlen(mce_helper) + 1;
2183 }
2184
2185 static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
2186 const char *buf, size_t siz)
2187 {
2188 char *p;
2189
2190 strncpy(mce_helper, buf, sizeof(mce_helper));
2191 mce_helper[sizeof(mce_helper)-1] = 0;
2192 p = strchr(mce_helper, '\n');
2193
2194 if (p)
2195 *p = 0;
2196
2197 return strlen(mce_helper) + !!p;
2198 }
2199
2200 static ssize_t set_ignore_ce(struct device *s,
2201 struct device_attribute *attr,
2202 const char *buf, size_t size)
2203 {
2204 u64 new;
2205
2206 if (kstrtou64(buf, 0, &new) < 0)
2207 return -EINVAL;
2208
2209 if (mca_cfg.ignore_ce ^ !!new) {
2210 if (new) {
2211 /* disable ce features */
2212 mce_timer_delete_all();
2213 on_each_cpu(mce_disable_cmci, NULL, 1);
2214 mca_cfg.ignore_ce = true;
2215 } else {
2216 /* enable ce features */
2217 mca_cfg.ignore_ce = false;
2218 on_each_cpu(mce_enable_ce, (void *)1, 1);
2219 }
2220 }
2221 return size;
2222 }
2223
2224 static ssize_t set_cmci_disabled(struct device *s,
2225 struct device_attribute *attr,
2226 const char *buf, size_t size)
2227 {
2228 u64 new;
2229
2230 if (kstrtou64(buf, 0, &new) < 0)
2231 return -EINVAL;
2232
2233 if (mca_cfg.cmci_disabled ^ !!new) {
2234 if (new) {
2235 /* disable cmci */
2236 on_each_cpu(mce_disable_cmci, NULL, 1);
2237 mca_cfg.cmci_disabled = true;
2238 } else {
2239 /* enable cmci */
2240 mca_cfg.cmci_disabled = false;
2241 on_each_cpu(mce_enable_ce, NULL, 1);
2242 }
2243 }
2244 return size;
2245 }
2246
2247 static ssize_t store_int_with_restart(struct device *s,
2248 struct device_attribute *attr,
2249 const char *buf, size_t size)
2250 {
2251 ssize_t ret = device_store_int(s, attr, buf, size);
2252 mce_restart();
2253 return ret;
2254 }
2255
2256 static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
2257 static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2258 static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2259 static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2260
2261 static struct dev_ext_attribute dev_attr_check_interval = {
2262 __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2263 &check_interval
2264 };
2265
2266 static struct dev_ext_attribute dev_attr_ignore_ce = {
2267 __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2268 &mca_cfg.ignore_ce
2269 };
2270
2271 static struct dev_ext_attribute dev_attr_cmci_disabled = {
2272 __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2273 &mca_cfg.cmci_disabled
2274 };
2275
2276 static struct device_attribute *mce_device_attrs[] = {
2277 &dev_attr_tolerant.attr,
2278 &dev_attr_check_interval.attr,
2279 &dev_attr_trigger,
2280 &dev_attr_monarch_timeout.attr,
2281 &dev_attr_dont_log_ce.attr,
2282 &dev_attr_ignore_ce.attr,
2283 &dev_attr_cmci_disabled.attr,
2284 NULL
2285 };
2286
2287 static cpumask_var_t mce_device_initialized;
2288
2289 static void mce_device_release(struct device *dev)
2290 {
2291 kfree(dev);
2292 }
2293
2294 /* Per cpu device init. All of the cpus still share the same ctrl bank: */
2295 static int mce_device_create(unsigned int cpu)
2296 {
2297 struct device *dev;
2298 int err;
2299 int i, j;
2300
2301 if (!mce_available(&boot_cpu_data))
2302 return -EIO;
2303
2304 dev = kzalloc(sizeof *dev, GFP_KERNEL);
2305 if (!dev)
2306 return -ENOMEM;
2307 dev->id = cpu;
2308 dev->bus = &mce_subsys;
2309 dev->release = &mce_device_release;
2310
2311 err = device_register(dev);
2312 if (err) {
2313 put_device(dev);
2314 return err;
2315 }
2316
2317 for (i = 0; mce_device_attrs[i]; i++) {
2318 err = device_create_file(dev, mce_device_attrs[i]);
2319 if (err)
2320 goto error;
2321 }
2322 for (j = 0; j < mca_cfg.banks; j++) {
2323 err = device_create_file(dev, &mce_banks[j].attr);
2324 if (err)
2325 goto error2;
2326 }
2327 cpumask_set_cpu(cpu, mce_device_initialized);
2328 per_cpu(mce_device, cpu) = dev;
2329
2330 return 0;
2331 error2:
2332 while (--j >= 0)
2333 device_remove_file(dev, &mce_banks[j].attr);
2334 error:
2335 while (--i >= 0)
2336 device_remove_file(dev, mce_device_attrs[i]);
2337
2338 device_unregister(dev);
2339
2340 return err;
2341 }
2342
2343 static void mce_device_remove(unsigned int cpu)
2344 {
2345 struct device *dev = per_cpu(mce_device, cpu);
2346 int i;
2347
2348 if (!cpumask_test_cpu(cpu, mce_device_initialized))
2349 return;
2350
2351 for (i = 0; mce_device_attrs[i]; i++)
2352 device_remove_file(dev, mce_device_attrs[i]);
2353
2354 for (i = 0; i < mca_cfg.banks; i++)
2355 device_remove_file(dev, &mce_banks[i].attr);
2356
2357 device_unregister(dev);
2358 cpumask_clear_cpu(cpu, mce_device_initialized);
2359 per_cpu(mce_device, cpu) = NULL;
2360 }
2361
2362 /* Make sure there are no machine checks on offlined CPUs. */
2363 static void mce_disable_cpu(void *h)
2364 {
2365 unsigned long action = *(unsigned long *)h;
2366 int i;
2367
2368 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2369 return;
2370
2371 if (!(action & CPU_TASKS_FROZEN))
2372 cmci_clear();
2373 for (i = 0; i < mca_cfg.banks; i++) {
2374 struct mce_bank *b = &mce_banks[i];
2375
2376 if (b->init)
2377 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
2378 }
2379 }
2380
2381 static void mce_reenable_cpu(void *h)
2382 {
2383 unsigned long action = *(unsigned long *)h;
2384 int i;
2385
2386 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2387 return;
2388
2389 if (!(action & CPU_TASKS_FROZEN))
2390 cmci_reenable();
2391 for (i = 0; i < mca_cfg.banks; i++) {
2392 struct mce_bank *b = &mce_banks[i];
2393
2394 if (b->init)
2395 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
2396 }
2397 }
2398
2399 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
2400 static int
2401 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2402 {
2403 unsigned int cpu = (unsigned long)hcpu;
2404 struct timer_list *t = &per_cpu(mce_timer, cpu);
2405
2406 switch (action & ~CPU_TASKS_FROZEN) {
2407 case CPU_ONLINE:
2408 mce_device_create(cpu);
2409 if (threshold_cpu_callback)
2410 threshold_cpu_callback(action, cpu);
2411 break;
2412 case CPU_DEAD:
2413 if (threshold_cpu_callback)
2414 threshold_cpu_callback(action, cpu);
2415 mce_device_remove(cpu);
2416 mce_intel_hcpu_update(cpu);
2417
2418 /* intentionally ignoring frozen here */
2419 if (!(action & CPU_TASKS_FROZEN))
2420 cmci_rediscover();
2421 break;
2422 case CPU_DOWN_PREPARE:
2423 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
2424 del_timer_sync(t);
2425 break;
2426 case CPU_DOWN_FAILED:
2427 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
2428 mce_start_timer(cpu, t);
2429 break;
2430 }
2431
2432 return NOTIFY_OK;
2433 }
2434
2435 static struct notifier_block mce_cpu_notifier = {
2436 .notifier_call = mce_cpu_callback,
2437 };
2438
2439 static __init void mce_init_banks(void)
2440 {
2441 int i;
2442
2443 for (i = 0; i < mca_cfg.banks; i++) {
2444 struct mce_bank *b = &mce_banks[i];
2445 struct device_attribute *a = &b->attr;
2446
2447 sysfs_attr_init(&a->attr);
2448 a->attr.name = b->attrname;
2449 snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2450
2451 a->attr.mode = 0644;
2452 a->show = show_bank;
2453 a->store = set_bank;
2454 }
2455 }
2456
2457 static __init int mcheck_init_device(void)
2458 {
2459 int err;
2460 int i = 0;
2461
2462 if (!mce_available(&boot_cpu_data)) {
2463 err = -EIO;
2464 goto err_out;
2465 }
2466
2467 if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2468 err = -ENOMEM;
2469 goto err_out;
2470 }
2471
2472 mce_init_banks();
2473
2474 err = subsys_system_register(&mce_subsys, NULL);
2475 if (err)
2476 goto err_out_mem;
2477
2478 cpu_notifier_register_begin();
2479 for_each_online_cpu(i) {
2480 err = mce_device_create(i);
2481 if (err) {
2482 /*
2483 * Register notifier anyway (and do not unreg it) so
2484 * that we don't leave undeleted timers, see notifier
2485 * callback above.
2486 */
2487 __register_hotcpu_notifier(&mce_cpu_notifier);
2488 cpu_notifier_register_done();
2489 goto err_device_create;
2490 }
2491 }
2492
2493 __register_hotcpu_notifier(&mce_cpu_notifier);
2494 cpu_notifier_register_done();
2495
2496 register_syscore_ops(&mce_syscore_ops);
2497
2498 /* register character device /dev/mcelog */
2499 err = misc_register(&mce_chrdev_device);
2500 if (err)
2501 goto err_register;
2502
2503 return 0;
2504
2505 err_register:
2506 unregister_syscore_ops(&mce_syscore_ops);
2507
2508 err_device_create:
2509 /*
2510 * We didn't keep track of which devices were created above, but
2511 * even if we had, the set of online cpus might have changed.
2512 * Play safe and remove for every possible cpu, since
2513 * mce_device_remove() will do the right thing.
2514 */
2515 for_each_possible_cpu(i)
2516 mce_device_remove(i);
2517
2518 err_out_mem:
2519 free_cpumask_var(mce_device_initialized);
2520
2521 err_out:
2522 pr_err("Unable to init device /dev/mcelog (rc: %d)\n", err);
2523
2524 return err;
2525 }
2526 device_initcall_sync(mcheck_init_device);
2527
2528 /*
2529 * Old style boot options parsing. Only for compatibility.
2530 */
2531 static int __init mcheck_disable(char *str)
2532 {
2533 mca_cfg.disabled = true;
2534 return 1;
2535 }
2536 __setup("nomce", mcheck_disable);
2537
2538 #ifdef CONFIG_DEBUG_FS
2539 struct dentry *mce_get_debugfs_dir(void)
2540 {
2541 static struct dentry *dmce;
2542
2543 if (!dmce)
2544 dmce = debugfs_create_dir("mce", NULL);
2545
2546 return dmce;
2547 }
2548
2549 static void mce_reset(void)
2550 {
2551 cpu_missing = 0;
2552 atomic_set(&mce_fake_panicked, 0);
2553 atomic_set(&mce_executing, 0);
2554 atomic_set(&mce_callin, 0);
2555 atomic_set(&global_nwo, 0);
2556 }
2557
2558 static int fake_panic_get(void *data, u64 *val)
2559 {
2560 *val = fake_panic;
2561 return 0;
2562 }
2563
2564 static int fake_panic_set(void *data, u64 val)
2565 {
2566 mce_reset();
2567 fake_panic = val;
2568 return 0;
2569 }
2570
2571 DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
2572 fake_panic_set, "%llu\n");
2573
2574 static int __init mcheck_debugfs_init(void)
2575 {
2576 struct dentry *dmce, *ffake_panic;
2577
2578 dmce = mce_get_debugfs_dir();
2579 if (!dmce)
2580 return -ENOMEM;
2581 ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
2582 &fake_panic_fops);
2583 if (!ffake_panic)
2584 return -ENOMEM;
2585
2586 return 0;
2587 }
2588 late_initcall(mcheck_debugfs_init);
2589 #endif
This page took 0.110523 seconds and 5 git commands to generate.