Merge tag 'v4.0-rc3' into asoc-rt5670
[deliverable/linux.git] / arch / x86 / kernel / cpu / mcheck / mce.c
1 /*
2 * Machine check handler.
3 *
4 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 * Rest from unknown author(s).
6 * 2004 Andi Kleen. Rewrote most of it.
7 * Copyright 2008 Intel Corporation
8 * Author: Andi Kleen
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/thread_info.h>
14 #include <linux/capability.h>
15 #include <linux/miscdevice.h>
16 #include <linux/ratelimit.h>
17 #include <linux/kallsyms.h>
18 #include <linux/rcupdate.h>
19 #include <linux/kobject.h>
20 #include <linux/uaccess.h>
21 #include <linux/kdebug.h>
22 #include <linux/kernel.h>
23 #include <linux/percpu.h>
24 #include <linux/string.h>
25 #include <linux/device.h>
26 #include <linux/syscore_ops.h>
27 #include <linux/delay.h>
28 #include <linux/ctype.h>
29 #include <linux/sched.h>
30 #include <linux/sysfs.h>
31 #include <linux/types.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/kmod.h>
35 #include <linux/poll.h>
36 #include <linux/nmi.h>
37 #include <linux/cpu.h>
38 #include <linux/smp.h>
39 #include <linux/fs.h>
40 #include <linux/mm.h>
41 #include <linux/debugfs.h>
42 #include <linux/irq_work.h>
43 #include <linux/export.h>
44
45 #include <asm/processor.h>
46 #include <asm/traps.h>
47 #include <asm/tlbflush.h>
48 #include <asm/mce.h>
49 #include <asm/msr.h>
50
51 #include "mce-internal.h"
52
53 static DEFINE_MUTEX(mce_chrdev_read_mutex);
54
55 #define rcu_dereference_check_mce(p) \
56 rcu_dereference_index_check((p), \
57 rcu_read_lock_sched_held() || \
58 lockdep_is_held(&mce_chrdev_read_mutex))
59
60 #define CREATE_TRACE_POINTS
61 #include <trace/events/mce.h>
62
63 #define SPINUNIT 100 /* 100ns */
64
65 DEFINE_PER_CPU(unsigned, mce_exception_count);
66
67 struct mce_bank *mce_banks __read_mostly;
68
69 struct mca_config mca_cfg __read_mostly = {
70 .bootlog = -1,
71 /*
72 * Tolerant levels:
73 * 0: always panic on uncorrected errors, log corrected errors
74 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
75 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
76 * 3: never panic or SIGBUS, log all errors (for testing only)
77 */
78 .tolerant = 1,
79 .monarch_timeout = -1
80 };
81
82 /* User mode helper program triggered by machine check event */
83 static unsigned long mce_need_notify;
84 static char mce_helper[128];
85 static char *mce_helper_argv[2] = { mce_helper, NULL };
86
87 static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
88
89 static DEFINE_PER_CPU(struct mce, mces_seen);
90 static int cpu_missing;
91
92 /* CMCI storm detection filter */
93 static DEFINE_PER_CPU(unsigned long, mce_polled_error);
94
95 /*
96 * MCA banks polled by the period polling timer for corrected events.
97 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
98 */
99 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
100 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
101 };
102
103 /*
104 * MCA banks controlled through firmware first for corrected errors.
105 * This is a global list of banks for which we won't enable CMCI and we
106 * won't poll. Firmware controls these banks and is responsible for
107 * reporting corrected errors through GHES. Uncorrected/recoverable
108 * errors are still notified through a machine check.
109 */
110 mce_banks_t mce_banks_ce_disabled;
111
112 static DEFINE_PER_CPU(struct work_struct, mce_work);
113
114 static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
115
116 /*
117 * CPU/chipset specific EDAC code can register a notifier call here to print
118 * MCE errors in a human-readable form.
119 */
120 static ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
121
122 /* Do initial initialization of a struct mce */
123 void mce_setup(struct mce *m)
124 {
125 memset(m, 0, sizeof(struct mce));
126 m->cpu = m->extcpu = smp_processor_id();
127 rdtscll(m->tsc);
128 /* We hope get_seconds stays lockless */
129 m->time = get_seconds();
130 m->cpuvendor = boot_cpu_data.x86_vendor;
131 m->cpuid = cpuid_eax(1);
132 m->socketid = cpu_data(m->extcpu).phys_proc_id;
133 m->apicid = cpu_data(m->extcpu).initial_apicid;
134 rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
135 }
136
137 DEFINE_PER_CPU(struct mce, injectm);
138 EXPORT_PER_CPU_SYMBOL_GPL(injectm);
139
140 /*
141 * Lockless MCE logging infrastructure.
142 * This avoids deadlocks on printk locks without having to break locks. Also
143 * separate MCEs from kernel messages to avoid bogus bug reports.
144 */
145
146 static struct mce_log mcelog = {
147 .signature = MCE_LOG_SIGNATURE,
148 .len = MCE_LOG_LEN,
149 .recordlen = sizeof(struct mce),
150 };
151
152 void mce_log(struct mce *mce)
153 {
154 unsigned next, entry;
155
156 /* Emit the trace record: */
157 trace_mce_record(mce);
158
159 atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
160
161 mce->finished = 0;
162 wmb();
163 for (;;) {
164 entry = rcu_dereference_check_mce(mcelog.next);
165 for (;;) {
166
167 /*
168 * When the buffer fills up discard new entries.
169 * Assume that the earlier errors are the more
170 * interesting ones:
171 */
172 if (entry >= MCE_LOG_LEN) {
173 set_bit(MCE_OVERFLOW,
174 (unsigned long *)&mcelog.flags);
175 return;
176 }
177 /* Old left over entry. Skip: */
178 if (mcelog.entry[entry].finished) {
179 entry++;
180 continue;
181 }
182 break;
183 }
184 smp_rmb();
185 next = entry + 1;
186 if (cmpxchg(&mcelog.next, entry, next) == entry)
187 break;
188 }
189 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
190 wmb();
191 mcelog.entry[entry].finished = 1;
192 wmb();
193
194 mce->finished = 1;
195 set_bit(0, &mce_need_notify);
196 }
197
198 static void drain_mcelog_buffer(void)
199 {
200 unsigned int next, i, prev = 0;
201
202 next = ACCESS_ONCE(mcelog.next);
203
204 do {
205 struct mce *m;
206
207 /* drain what was logged during boot */
208 for (i = prev; i < next; i++) {
209 unsigned long start = jiffies;
210 unsigned retries = 1;
211
212 m = &mcelog.entry[i];
213
214 while (!m->finished) {
215 if (time_after_eq(jiffies, start + 2*retries))
216 retries++;
217
218 cpu_relax();
219
220 if (!m->finished && retries >= 4) {
221 pr_err("skipping error being logged currently!\n");
222 break;
223 }
224 }
225 smp_rmb();
226 atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
227 }
228
229 memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m));
230 prev = next;
231 next = cmpxchg(&mcelog.next, prev, 0);
232 } while (next != prev);
233 }
234
235
236 void mce_register_decode_chain(struct notifier_block *nb)
237 {
238 atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
239 drain_mcelog_buffer();
240 }
241 EXPORT_SYMBOL_GPL(mce_register_decode_chain);
242
243 void mce_unregister_decode_chain(struct notifier_block *nb)
244 {
245 atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
246 }
247 EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
248
249 static void print_mce(struct mce *m)
250 {
251 int ret = 0;
252
253 pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
254 m->extcpu, m->mcgstatus, m->bank, m->status);
255
256 if (m->ip) {
257 pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
258 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
259 m->cs, m->ip);
260
261 if (m->cs == __KERNEL_CS)
262 print_symbol("{%s}", m->ip);
263 pr_cont("\n");
264 }
265
266 pr_emerg(HW_ERR "TSC %llx ", m->tsc);
267 if (m->addr)
268 pr_cont("ADDR %llx ", m->addr);
269 if (m->misc)
270 pr_cont("MISC %llx ", m->misc);
271
272 pr_cont("\n");
273 /*
274 * Note this output is parsed by external tools and old fields
275 * should not be changed.
276 */
277 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
278 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
279 cpu_data(m->extcpu).microcode);
280
281 /*
282 * Print out human-readable details about the MCE error,
283 * (if the CPU has an implementation for that)
284 */
285 ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
286 if (ret == NOTIFY_STOP)
287 return;
288
289 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
290 }
291
292 #define PANIC_TIMEOUT 5 /* 5 seconds */
293
294 static atomic_t mce_panicked;
295
296 static int fake_panic;
297 static atomic_t mce_fake_panicked;
298
299 /* Panic in progress. Enable interrupts and wait for final IPI */
300 static void wait_for_panic(void)
301 {
302 long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
303
304 preempt_disable();
305 local_irq_enable();
306 while (timeout-- > 0)
307 udelay(1);
308 if (panic_timeout == 0)
309 panic_timeout = mca_cfg.panic_timeout;
310 panic("Panicing machine check CPU died");
311 }
312
313 static void mce_panic(const char *msg, struct mce *final, char *exp)
314 {
315 int i, apei_err = 0;
316
317 if (!fake_panic) {
318 /*
319 * Make sure only one CPU runs in machine check panic
320 */
321 if (atomic_inc_return(&mce_panicked) > 1)
322 wait_for_panic();
323 barrier();
324
325 bust_spinlocks(1);
326 console_verbose();
327 } else {
328 /* Don't log too much for fake panic */
329 if (atomic_inc_return(&mce_fake_panicked) > 1)
330 return;
331 }
332 /* First print corrected ones that are still unlogged */
333 for (i = 0; i < MCE_LOG_LEN; i++) {
334 struct mce *m = &mcelog.entry[i];
335 if (!(m->status & MCI_STATUS_VAL))
336 continue;
337 if (!(m->status & MCI_STATUS_UC)) {
338 print_mce(m);
339 if (!apei_err)
340 apei_err = apei_write_mce(m);
341 }
342 }
343 /* Now print uncorrected but with the final one last */
344 for (i = 0; i < MCE_LOG_LEN; i++) {
345 struct mce *m = &mcelog.entry[i];
346 if (!(m->status & MCI_STATUS_VAL))
347 continue;
348 if (!(m->status & MCI_STATUS_UC))
349 continue;
350 if (!final || memcmp(m, final, sizeof(struct mce))) {
351 print_mce(m);
352 if (!apei_err)
353 apei_err = apei_write_mce(m);
354 }
355 }
356 if (final) {
357 print_mce(final);
358 if (!apei_err)
359 apei_err = apei_write_mce(final);
360 }
361 if (cpu_missing)
362 pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
363 if (exp)
364 pr_emerg(HW_ERR "Machine check: %s\n", exp);
365 if (!fake_panic) {
366 if (panic_timeout == 0)
367 panic_timeout = mca_cfg.panic_timeout;
368 panic(msg);
369 } else
370 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
371 }
372
373 /* Support code for software error injection */
374
375 static int msr_to_offset(u32 msr)
376 {
377 unsigned bank = __this_cpu_read(injectm.bank);
378
379 if (msr == mca_cfg.rip_msr)
380 return offsetof(struct mce, ip);
381 if (msr == MSR_IA32_MCx_STATUS(bank))
382 return offsetof(struct mce, status);
383 if (msr == MSR_IA32_MCx_ADDR(bank))
384 return offsetof(struct mce, addr);
385 if (msr == MSR_IA32_MCx_MISC(bank))
386 return offsetof(struct mce, misc);
387 if (msr == MSR_IA32_MCG_STATUS)
388 return offsetof(struct mce, mcgstatus);
389 return -1;
390 }
391
392 /* MSR access wrappers used for error injection */
393 static u64 mce_rdmsrl(u32 msr)
394 {
395 u64 v;
396
397 if (__this_cpu_read(injectm.finished)) {
398 int offset = msr_to_offset(msr);
399
400 if (offset < 0)
401 return 0;
402 return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
403 }
404
405 if (rdmsrl_safe(msr, &v)) {
406 WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
407 /*
408 * Return zero in case the access faulted. This should
409 * not happen normally but can happen if the CPU does
410 * something weird, or if the code is buggy.
411 */
412 v = 0;
413 }
414
415 return v;
416 }
417
418 static void mce_wrmsrl(u32 msr, u64 v)
419 {
420 if (__this_cpu_read(injectm.finished)) {
421 int offset = msr_to_offset(msr);
422
423 if (offset >= 0)
424 *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
425 return;
426 }
427 wrmsrl(msr, v);
428 }
429
430 /*
431 * Collect all global (w.r.t. this processor) status about this machine
432 * check into our "mce" struct so that we can use it later to assess
433 * the severity of the problem as we read per-bank specific details.
434 */
435 static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
436 {
437 mce_setup(m);
438
439 m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
440 if (regs) {
441 /*
442 * Get the address of the instruction at the time of
443 * the machine check error.
444 */
445 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
446 m->ip = regs->ip;
447 m->cs = regs->cs;
448
449 /*
450 * When in VM86 mode make the cs look like ring 3
451 * always. This is a lie, but it's better than passing
452 * the additional vm86 bit around everywhere.
453 */
454 if (v8086_mode(regs))
455 m->cs |= 3;
456 }
457 /* Use accurate RIP reporting if available. */
458 if (mca_cfg.rip_msr)
459 m->ip = mce_rdmsrl(mca_cfg.rip_msr);
460 }
461 }
462
463 /*
464 * Simple lockless ring to communicate PFNs from the exception handler with the
465 * process context work function. This is vastly simplified because there's
466 * only a single reader and a single writer.
467 */
468 #define MCE_RING_SIZE 16 /* we use one entry less */
469
470 struct mce_ring {
471 unsigned short start;
472 unsigned short end;
473 unsigned long ring[MCE_RING_SIZE];
474 };
475 static DEFINE_PER_CPU(struct mce_ring, mce_ring);
476
477 /* Runs with CPU affinity in workqueue */
478 static int mce_ring_empty(void)
479 {
480 struct mce_ring *r = this_cpu_ptr(&mce_ring);
481
482 return r->start == r->end;
483 }
484
485 static int mce_ring_get(unsigned long *pfn)
486 {
487 struct mce_ring *r;
488 int ret = 0;
489
490 *pfn = 0;
491 get_cpu();
492 r = this_cpu_ptr(&mce_ring);
493 if (r->start == r->end)
494 goto out;
495 *pfn = r->ring[r->start];
496 r->start = (r->start + 1) % MCE_RING_SIZE;
497 ret = 1;
498 out:
499 put_cpu();
500 return ret;
501 }
502
503 /* Always runs in MCE context with preempt off */
504 static int mce_ring_add(unsigned long pfn)
505 {
506 struct mce_ring *r = this_cpu_ptr(&mce_ring);
507 unsigned next;
508
509 next = (r->end + 1) % MCE_RING_SIZE;
510 if (next == r->start)
511 return -1;
512 r->ring[r->end] = pfn;
513 wmb();
514 r->end = next;
515 return 0;
516 }
517
518 int mce_available(struct cpuinfo_x86 *c)
519 {
520 if (mca_cfg.disabled)
521 return 0;
522 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
523 }
524
525 static void mce_schedule_work(void)
526 {
527 if (!mce_ring_empty())
528 schedule_work(this_cpu_ptr(&mce_work));
529 }
530
531 static DEFINE_PER_CPU(struct irq_work, mce_irq_work);
532
533 static void mce_irq_work_cb(struct irq_work *entry)
534 {
535 mce_notify_irq();
536 mce_schedule_work();
537 }
538
539 static void mce_report_event(struct pt_regs *regs)
540 {
541 if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
542 mce_notify_irq();
543 /*
544 * Triggering the work queue here is just an insurance
545 * policy in case the syscall exit notify handler
546 * doesn't run soon enough or ends up running on the
547 * wrong CPU (can happen when audit sleeps)
548 */
549 mce_schedule_work();
550 return;
551 }
552
553 irq_work_queue(this_cpu_ptr(&mce_irq_work));
554 }
555
556 /*
557 * Read ADDR and MISC registers.
558 */
559 static void mce_read_aux(struct mce *m, int i)
560 {
561 if (m->status & MCI_STATUS_MISCV)
562 m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
563 if (m->status & MCI_STATUS_ADDRV) {
564 m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
565
566 /*
567 * Mask the reported address by the reported granularity.
568 */
569 if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
570 u8 shift = MCI_MISC_ADDR_LSB(m->misc);
571 m->addr >>= shift;
572 m->addr <<= shift;
573 }
574 }
575 }
576
577 static bool memory_error(struct mce *m)
578 {
579 struct cpuinfo_x86 *c = &boot_cpu_data;
580
581 if (c->x86_vendor == X86_VENDOR_AMD) {
582 /*
583 * coming soon
584 */
585 return false;
586 } else if (c->x86_vendor == X86_VENDOR_INTEL) {
587 /*
588 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
589 *
590 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
591 * indicating a memory error. Bit 8 is used for indicating a
592 * cache hierarchy error. The combination of bit 2 and bit 3
593 * is used for indicating a `generic' cache hierarchy error
594 * But we can't just blindly check the above bits, because if
595 * bit 11 is set, then it is a bus/interconnect error - and
596 * either way the above bits just gives more detail on what
597 * bus/interconnect error happened. Note that bit 12 can be
598 * ignored, as it's the "filter" bit.
599 */
600 return (m->status & 0xef80) == BIT(7) ||
601 (m->status & 0xef00) == BIT(8) ||
602 (m->status & 0xeffc) == 0xc;
603 }
604
605 return false;
606 }
607
608 DEFINE_PER_CPU(unsigned, mce_poll_count);
609
610 /*
611 * Poll for corrected events or events that happened before reset.
612 * Those are just logged through /dev/mcelog.
613 *
614 * This is executed in standard interrupt context.
615 *
616 * Note: spec recommends to panic for fatal unsignalled
617 * errors here. However this would be quite problematic --
618 * we would need to reimplement the Monarch handling and
619 * it would mess up the exclusion between exception handler
620 * and poll hander -- * so we skip this for now.
621 * These cases should not happen anyways, or only when the CPU
622 * is already totally * confused. In this case it's likely it will
623 * not fully execute the machine check handler either.
624 */
625 void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
626 {
627 struct mce m;
628 int severity;
629 int i;
630
631 this_cpu_inc(mce_poll_count);
632
633 mce_gather_info(&m, NULL);
634
635 for (i = 0; i < mca_cfg.banks; i++) {
636 if (!mce_banks[i].ctl || !test_bit(i, *b))
637 continue;
638
639 m.misc = 0;
640 m.addr = 0;
641 m.bank = i;
642 m.tsc = 0;
643
644 barrier();
645 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
646 if (!(m.status & MCI_STATUS_VAL))
647 continue;
648
649 this_cpu_write(mce_polled_error, 1);
650 /*
651 * Uncorrected or signalled events are handled by the exception
652 * handler when it is enabled, so don't process those here.
653 *
654 * TBD do the same check for MCI_STATUS_EN here?
655 */
656 if (!(flags & MCP_UC) &&
657 (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
658 continue;
659
660 mce_read_aux(&m, i);
661
662 if (!(flags & MCP_TIMESTAMP))
663 m.tsc = 0;
664
665 severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
666
667 /*
668 * In the cases where we don't have a valid address after all,
669 * do not add it into the ring buffer.
670 */
671 if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m)) {
672 if (m.status & MCI_STATUS_ADDRV) {
673 mce_ring_add(m.addr >> PAGE_SHIFT);
674 mce_schedule_work();
675 }
676 }
677
678 /*
679 * Don't get the IP here because it's unlikely to
680 * have anything to do with the actual error location.
681 */
682 if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
683 mce_log(&m);
684
685 /*
686 * Clear state for this bank.
687 */
688 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
689 }
690
691 /*
692 * Don't clear MCG_STATUS here because it's only defined for
693 * exceptions.
694 */
695
696 sync_core();
697 }
698 EXPORT_SYMBOL_GPL(machine_check_poll);
699
700 /*
701 * Do a quick check if any of the events requires a panic.
702 * This decides if we keep the events around or clear them.
703 */
704 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
705 struct pt_regs *regs)
706 {
707 int i, ret = 0;
708
709 for (i = 0; i < mca_cfg.banks; i++) {
710 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
711 if (m->status & MCI_STATUS_VAL) {
712 __set_bit(i, validp);
713 if (quirk_no_way_out)
714 quirk_no_way_out(i, m, regs);
715 }
716 if (mce_severity(m, mca_cfg.tolerant, msg, true) >=
717 MCE_PANIC_SEVERITY)
718 ret = 1;
719 }
720 return ret;
721 }
722
723 /*
724 * Variable to establish order between CPUs while scanning.
725 * Each CPU spins initially until executing is equal its number.
726 */
727 static atomic_t mce_executing;
728
729 /*
730 * Defines order of CPUs on entry. First CPU becomes Monarch.
731 */
732 static atomic_t mce_callin;
733
734 /*
735 * Check if a timeout waiting for other CPUs happened.
736 */
737 static int mce_timed_out(u64 *t, const char *msg)
738 {
739 /*
740 * The others already did panic for some reason.
741 * Bail out like in a timeout.
742 * rmb() to tell the compiler that system_state
743 * might have been modified by someone else.
744 */
745 rmb();
746 if (atomic_read(&mce_panicked))
747 wait_for_panic();
748 if (!mca_cfg.monarch_timeout)
749 goto out;
750 if ((s64)*t < SPINUNIT) {
751 if (mca_cfg.tolerant <= 1)
752 mce_panic(msg, NULL, NULL);
753 cpu_missing = 1;
754 return 1;
755 }
756 *t -= SPINUNIT;
757 out:
758 touch_nmi_watchdog();
759 return 0;
760 }
761
762 /*
763 * The Monarch's reign. The Monarch is the CPU who entered
764 * the machine check handler first. It waits for the others to
765 * raise the exception too and then grades them. When any
766 * error is fatal panic. Only then let the others continue.
767 *
768 * The other CPUs entering the MCE handler will be controlled by the
769 * Monarch. They are called Subjects.
770 *
771 * This way we prevent any potential data corruption in a unrecoverable case
772 * and also makes sure always all CPU's errors are examined.
773 *
774 * Also this detects the case of a machine check event coming from outer
775 * space (not detected by any CPUs) In this case some external agent wants
776 * us to shut down, so panic too.
777 *
778 * The other CPUs might still decide to panic if the handler happens
779 * in a unrecoverable place, but in this case the system is in a semi-stable
780 * state and won't corrupt anything by itself. It's ok to let the others
781 * continue for a bit first.
782 *
783 * All the spin loops have timeouts; when a timeout happens a CPU
784 * typically elects itself to be Monarch.
785 */
786 static void mce_reign(void)
787 {
788 int cpu;
789 struct mce *m = NULL;
790 int global_worst = 0;
791 char *msg = NULL;
792 char *nmsg = NULL;
793
794 /*
795 * This CPU is the Monarch and the other CPUs have run
796 * through their handlers.
797 * Grade the severity of the errors of all the CPUs.
798 */
799 for_each_possible_cpu(cpu) {
800 int severity = mce_severity(&per_cpu(mces_seen, cpu),
801 mca_cfg.tolerant,
802 &nmsg, true);
803 if (severity > global_worst) {
804 msg = nmsg;
805 global_worst = severity;
806 m = &per_cpu(mces_seen, cpu);
807 }
808 }
809
810 /*
811 * Cannot recover? Panic here then.
812 * This dumps all the mces in the log buffer and stops the
813 * other CPUs.
814 */
815 if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
816 mce_panic("Fatal Machine check", m, msg);
817
818 /*
819 * For UC somewhere we let the CPU who detects it handle it.
820 * Also must let continue the others, otherwise the handling
821 * CPU could deadlock on a lock.
822 */
823
824 /*
825 * No machine check event found. Must be some external
826 * source or one CPU is hung. Panic.
827 */
828 if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
829 mce_panic("Machine check from unknown source", NULL, NULL);
830
831 /*
832 * Now clear all the mces_seen so that they don't reappear on
833 * the next mce.
834 */
835 for_each_possible_cpu(cpu)
836 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
837 }
838
839 static atomic_t global_nwo;
840
841 /*
842 * Start of Monarch synchronization. This waits until all CPUs have
843 * entered the exception handler and then determines if any of them
844 * saw a fatal event that requires panic. Then it executes them
845 * in the entry order.
846 * TBD double check parallel CPU hotunplug
847 */
848 static int mce_start(int *no_way_out)
849 {
850 int order;
851 int cpus = num_online_cpus();
852 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
853
854 if (!timeout)
855 return -1;
856
857 atomic_add(*no_way_out, &global_nwo);
858 /*
859 * global_nwo should be updated before mce_callin
860 */
861 smp_wmb();
862 order = atomic_inc_return(&mce_callin);
863
864 /*
865 * Wait for everyone.
866 */
867 while (atomic_read(&mce_callin) != cpus) {
868 if (mce_timed_out(&timeout,
869 "Timeout: Not all CPUs entered broadcast exception handler")) {
870 atomic_set(&global_nwo, 0);
871 return -1;
872 }
873 ndelay(SPINUNIT);
874 }
875
876 /*
877 * mce_callin should be read before global_nwo
878 */
879 smp_rmb();
880
881 if (order == 1) {
882 /*
883 * Monarch: Starts executing now, the others wait.
884 */
885 atomic_set(&mce_executing, 1);
886 } else {
887 /*
888 * Subject: Now start the scanning loop one by one in
889 * the original callin order.
890 * This way when there are any shared banks it will be
891 * only seen by one CPU before cleared, avoiding duplicates.
892 */
893 while (atomic_read(&mce_executing) < order) {
894 if (mce_timed_out(&timeout,
895 "Timeout: Subject CPUs unable to finish machine check processing")) {
896 atomic_set(&global_nwo, 0);
897 return -1;
898 }
899 ndelay(SPINUNIT);
900 }
901 }
902
903 /*
904 * Cache the global no_way_out state.
905 */
906 *no_way_out = atomic_read(&global_nwo);
907
908 return order;
909 }
910
911 /*
912 * Synchronize between CPUs after main scanning loop.
913 * This invokes the bulk of the Monarch processing.
914 */
915 static int mce_end(int order)
916 {
917 int ret = -1;
918 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
919
920 if (!timeout)
921 goto reset;
922 if (order < 0)
923 goto reset;
924
925 /*
926 * Allow others to run.
927 */
928 atomic_inc(&mce_executing);
929
930 if (order == 1) {
931 /* CHECKME: Can this race with a parallel hotplug? */
932 int cpus = num_online_cpus();
933
934 /*
935 * Monarch: Wait for everyone to go through their scanning
936 * loops.
937 */
938 while (atomic_read(&mce_executing) <= cpus) {
939 if (mce_timed_out(&timeout,
940 "Timeout: Monarch CPU unable to finish machine check processing"))
941 goto reset;
942 ndelay(SPINUNIT);
943 }
944
945 mce_reign();
946 barrier();
947 ret = 0;
948 } else {
949 /*
950 * Subject: Wait for Monarch to finish.
951 */
952 while (atomic_read(&mce_executing) != 0) {
953 if (mce_timed_out(&timeout,
954 "Timeout: Monarch CPU did not finish machine check processing"))
955 goto reset;
956 ndelay(SPINUNIT);
957 }
958
959 /*
960 * Don't reset anything. That's done by the Monarch.
961 */
962 return 0;
963 }
964
965 /*
966 * Reset all global state.
967 */
968 reset:
969 atomic_set(&global_nwo, 0);
970 atomic_set(&mce_callin, 0);
971 barrier();
972
973 /*
974 * Let others run again.
975 */
976 atomic_set(&mce_executing, 0);
977 return ret;
978 }
979
980 /*
981 * Check if the address reported by the CPU is in a format we can parse.
982 * It would be possible to add code for most other cases, but all would
983 * be somewhat complicated (e.g. segment offset would require an instruction
984 * parser). So only support physical addresses up to page granuality for now.
985 */
986 static int mce_usable_address(struct mce *m)
987 {
988 if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
989 return 0;
990 if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
991 return 0;
992 if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
993 return 0;
994 return 1;
995 }
996
997 static void mce_clear_state(unsigned long *toclear)
998 {
999 int i;
1000
1001 for (i = 0; i < mca_cfg.banks; i++) {
1002 if (test_bit(i, toclear))
1003 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
1004 }
1005 }
1006
1007 /*
1008 * The actual machine check handler. This only handles real
1009 * exceptions when something got corrupted coming in through int 18.
1010 *
1011 * This is executed in NMI context not subject to normal locking rules. This
1012 * implies that most kernel services cannot be safely used. Don't even
1013 * think about putting a printk in there!
1014 *
1015 * On Intel systems this is entered on all CPUs in parallel through
1016 * MCE broadcast. However some CPUs might be broken beyond repair,
1017 * so be always careful when synchronizing with others.
1018 */
1019 void do_machine_check(struct pt_regs *regs, long error_code)
1020 {
1021 struct mca_config *cfg = &mca_cfg;
1022 struct mce m, *final;
1023 enum ctx_state prev_state;
1024 int i;
1025 int worst = 0;
1026 int severity;
1027 /*
1028 * Establish sequential order between the CPUs entering the machine
1029 * check handler.
1030 */
1031 int order;
1032 /*
1033 * If no_way_out gets set, there is no safe way to recover from this
1034 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
1035 */
1036 int no_way_out = 0;
1037 /*
1038 * If kill_it gets set, there might be a way to recover from this
1039 * error.
1040 */
1041 int kill_it = 0;
1042 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1043 DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1044 char *msg = "Unknown";
1045 u64 recover_paddr = ~0ull;
1046 int flags = MF_ACTION_REQUIRED;
1047
1048 prev_state = ist_enter(regs);
1049
1050 this_cpu_inc(mce_exception_count);
1051
1052 if (!cfg->banks)
1053 goto out;
1054
1055 mce_gather_info(&m, regs);
1056
1057 final = this_cpu_ptr(&mces_seen);
1058 *final = m;
1059
1060 memset(valid_banks, 0, sizeof(valid_banks));
1061 no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1062
1063 barrier();
1064
1065 /*
1066 * When no restart IP might need to kill or panic.
1067 * Assume the worst for now, but if we find the
1068 * severity is MCE_AR_SEVERITY we have other options.
1069 */
1070 if (!(m.mcgstatus & MCG_STATUS_RIPV))
1071 kill_it = 1;
1072
1073 /*
1074 * Go through all the banks in exclusion of the other CPUs.
1075 * This way we don't report duplicated events on shared banks
1076 * because the first one to see it will clear it.
1077 */
1078 order = mce_start(&no_way_out);
1079 for (i = 0; i < cfg->banks; i++) {
1080 __clear_bit(i, toclear);
1081 if (!test_bit(i, valid_banks))
1082 continue;
1083 if (!mce_banks[i].ctl)
1084 continue;
1085
1086 m.misc = 0;
1087 m.addr = 0;
1088 m.bank = i;
1089
1090 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
1091 if ((m.status & MCI_STATUS_VAL) == 0)
1092 continue;
1093
1094 /*
1095 * Non uncorrected or non signaled errors are handled by
1096 * machine_check_poll. Leave them alone, unless this panics.
1097 */
1098 if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1099 !no_way_out)
1100 continue;
1101
1102 /*
1103 * Set taint even when machine check was not enabled.
1104 */
1105 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1106
1107 severity = mce_severity(&m, cfg->tolerant, NULL, true);
1108
1109 /*
1110 * When machine check was for corrected/deferred handler don't
1111 * touch, unless we're panicing.
1112 */
1113 if ((severity == MCE_KEEP_SEVERITY ||
1114 severity == MCE_UCNA_SEVERITY) && !no_way_out)
1115 continue;
1116 __set_bit(i, toclear);
1117 if (severity == MCE_NO_SEVERITY) {
1118 /*
1119 * Machine check event was not enabled. Clear, but
1120 * ignore.
1121 */
1122 continue;
1123 }
1124
1125 mce_read_aux(&m, i);
1126
1127 /*
1128 * Action optional error. Queue address for later processing.
1129 * When the ring overflows we just ignore the AO error.
1130 * RED-PEN add some logging mechanism when
1131 * usable_address or mce_add_ring fails.
1132 * RED-PEN don't ignore overflow for mca_cfg.tolerant == 0
1133 */
1134 if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
1135 mce_ring_add(m.addr >> PAGE_SHIFT);
1136
1137 mce_log(&m);
1138
1139 if (severity > worst) {
1140 *final = m;
1141 worst = severity;
1142 }
1143 }
1144
1145 /* mce_clear_state will clear *final, save locally for use later */
1146 m = *final;
1147
1148 if (!no_way_out)
1149 mce_clear_state(toclear);
1150
1151 /*
1152 * Do most of the synchronization with other CPUs.
1153 * When there's any problem use only local no_way_out state.
1154 */
1155 if (mce_end(order) < 0)
1156 no_way_out = worst >= MCE_PANIC_SEVERITY;
1157
1158 /*
1159 * At insane "tolerant" levels we take no action. Otherwise
1160 * we only die if we have no other choice. For less serious
1161 * issues we try to recover, or limit damage to the current
1162 * process.
1163 */
1164 if (cfg->tolerant < 3) {
1165 if (no_way_out)
1166 mce_panic("Fatal machine check on current CPU", &m, msg);
1167 if (worst == MCE_AR_SEVERITY) {
1168 recover_paddr = m.addr;
1169 if (!(m.mcgstatus & MCG_STATUS_RIPV))
1170 flags |= MF_MUST_KILL;
1171 } else if (kill_it) {
1172 force_sig(SIGBUS, current);
1173 }
1174 }
1175
1176 if (worst > 0)
1177 mce_report_event(regs);
1178 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1179 out:
1180 sync_core();
1181
1182 if (recover_paddr == ~0ull)
1183 goto done;
1184
1185 pr_err("Uncorrected hardware memory error in user-access at %llx",
1186 recover_paddr);
1187 /*
1188 * We must call memory_failure() here even if the current process is
1189 * doomed. We still need to mark the page as poisoned and alert any
1190 * other users of the page.
1191 */
1192 ist_begin_non_atomic(regs);
1193 local_irq_enable();
1194 if (memory_failure(recover_paddr >> PAGE_SHIFT, MCE_VECTOR, flags) < 0) {
1195 pr_err("Memory error not recovered");
1196 force_sig(SIGBUS, current);
1197 }
1198 local_irq_disable();
1199 ist_end_non_atomic();
1200 done:
1201 ist_exit(regs, prev_state);
1202 }
1203 EXPORT_SYMBOL_GPL(do_machine_check);
1204
1205 #ifndef CONFIG_MEMORY_FAILURE
1206 int memory_failure(unsigned long pfn, int vector, int flags)
1207 {
1208 /* mce_severity() should not hand us an ACTION_REQUIRED error */
1209 BUG_ON(flags & MF_ACTION_REQUIRED);
1210 pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1211 "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1212 pfn);
1213
1214 return 0;
1215 }
1216 #endif
1217
1218 /*
1219 * Action optional processing happens here (picking up
1220 * from the list of faulting pages that do_machine_check()
1221 * placed into the "ring").
1222 */
1223 static void mce_process_work(struct work_struct *dummy)
1224 {
1225 unsigned long pfn;
1226
1227 while (mce_ring_get(&pfn))
1228 memory_failure(pfn, MCE_VECTOR, 0);
1229 }
1230
1231 #ifdef CONFIG_X86_MCE_INTEL
1232 /***
1233 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
1234 * @cpu: The CPU on which the event occurred.
1235 * @status: Event status information
1236 *
1237 * This function should be called by the thermal interrupt after the
1238 * event has been processed and the decision was made to log the event
1239 * further.
1240 *
1241 * The status parameter will be saved to the 'status' field of 'struct mce'
1242 * and historically has been the register value of the
1243 * MSR_IA32_THERMAL_STATUS (Intel) msr.
1244 */
1245 void mce_log_therm_throt_event(__u64 status)
1246 {
1247 struct mce m;
1248
1249 mce_setup(&m);
1250 m.bank = MCE_THERMAL_BANK;
1251 m.status = status;
1252 mce_log(&m);
1253 }
1254 #endif /* CONFIG_X86_MCE_INTEL */
1255
1256 /*
1257 * Periodic polling timer for "silent" machine check errors. If the
1258 * poller finds an MCE, poll 2x faster. When the poller finds no more
1259 * errors, poll 2x slower (up to check_interval seconds).
1260 */
1261 static unsigned long check_interval = 5 * 60; /* 5 minutes */
1262
1263 static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1264 static DEFINE_PER_CPU(struct timer_list, mce_timer);
1265
1266 static unsigned long mce_adjust_timer_default(unsigned long interval)
1267 {
1268 return interval;
1269 }
1270
1271 static unsigned long (*mce_adjust_timer)(unsigned long interval) =
1272 mce_adjust_timer_default;
1273
1274 static int cmc_error_seen(void)
1275 {
1276 unsigned long *v = this_cpu_ptr(&mce_polled_error);
1277
1278 return test_and_clear_bit(0, v);
1279 }
1280
1281 static void mce_timer_fn(unsigned long data)
1282 {
1283 struct timer_list *t = this_cpu_ptr(&mce_timer);
1284 unsigned long iv;
1285 int notify;
1286
1287 WARN_ON(smp_processor_id() != data);
1288
1289 if (mce_available(this_cpu_ptr(&cpu_info))) {
1290 machine_check_poll(MCP_TIMESTAMP,
1291 this_cpu_ptr(&mce_poll_banks));
1292 mce_intel_cmci_poll();
1293 }
1294
1295 /*
1296 * Alert userspace if needed. If we logged an MCE, reduce the
1297 * polling interval, otherwise increase the polling interval.
1298 */
1299 iv = __this_cpu_read(mce_next_interval);
1300 notify = mce_notify_irq();
1301 notify |= cmc_error_seen();
1302 if (notify) {
1303 iv = max(iv / 2, (unsigned long) HZ/100);
1304 } else {
1305 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1306 iv = mce_adjust_timer(iv);
1307 }
1308 __this_cpu_write(mce_next_interval, iv);
1309 /* Might have become 0 after CMCI storm subsided */
1310 if (iv) {
1311 t->expires = jiffies + iv;
1312 add_timer_on(t, smp_processor_id());
1313 }
1314 }
1315
1316 /*
1317 * Ensure that the timer is firing in @interval from now.
1318 */
1319 void mce_timer_kick(unsigned long interval)
1320 {
1321 struct timer_list *t = this_cpu_ptr(&mce_timer);
1322 unsigned long when = jiffies + interval;
1323 unsigned long iv = __this_cpu_read(mce_next_interval);
1324
1325 if (timer_pending(t)) {
1326 if (time_before(when, t->expires))
1327 mod_timer_pinned(t, when);
1328 } else {
1329 t->expires = round_jiffies(when);
1330 add_timer_on(t, smp_processor_id());
1331 }
1332 if (interval < iv)
1333 __this_cpu_write(mce_next_interval, interval);
1334 }
1335
1336 /* Must not be called in IRQ context where del_timer_sync() can deadlock */
1337 static void mce_timer_delete_all(void)
1338 {
1339 int cpu;
1340
1341 for_each_online_cpu(cpu)
1342 del_timer_sync(&per_cpu(mce_timer, cpu));
1343 }
1344
1345 static void mce_do_trigger(struct work_struct *work)
1346 {
1347 call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
1348 }
1349
1350 static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
1351
1352 /*
1353 * Notify the user(s) about new machine check events.
1354 * Can be called from interrupt context, but not from machine check/NMI
1355 * context.
1356 */
1357 int mce_notify_irq(void)
1358 {
1359 /* Not more than two messages every minute */
1360 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1361
1362 if (test_and_clear_bit(0, &mce_need_notify)) {
1363 /* wake processes polling /dev/mcelog */
1364 wake_up_interruptible(&mce_chrdev_wait);
1365
1366 if (mce_helper[0])
1367 schedule_work(&mce_trigger_work);
1368
1369 if (__ratelimit(&ratelimit))
1370 pr_info(HW_ERR "Machine check events logged\n");
1371
1372 return 1;
1373 }
1374 return 0;
1375 }
1376 EXPORT_SYMBOL_GPL(mce_notify_irq);
1377
1378 static int __mcheck_cpu_mce_banks_init(void)
1379 {
1380 int i;
1381 u8 num_banks = mca_cfg.banks;
1382
1383 mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
1384 if (!mce_banks)
1385 return -ENOMEM;
1386
1387 for (i = 0; i < num_banks; i++) {
1388 struct mce_bank *b = &mce_banks[i];
1389
1390 b->ctl = -1ULL;
1391 b->init = 1;
1392 }
1393 return 0;
1394 }
1395
1396 /*
1397 * Initialize Machine Checks for a CPU.
1398 */
1399 static int __mcheck_cpu_cap_init(void)
1400 {
1401 unsigned b;
1402 u64 cap;
1403
1404 rdmsrl(MSR_IA32_MCG_CAP, cap);
1405
1406 b = cap & MCG_BANKCNT_MASK;
1407 if (!mca_cfg.banks)
1408 pr_info("CPU supports %d MCE banks\n", b);
1409
1410 if (b > MAX_NR_BANKS) {
1411 pr_warn("Using only %u machine check banks out of %u\n",
1412 MAX_NR_BANKS, b);
1413 b = MAX_NR_BANKS;
1414 }
1415
1416 /* Don't support asymmetric configurations today */
1417 WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1418 mca_cfg.banks = b;
1419
1420 if (!mce_banks) {
1421 int err = __mcheck_cpu_mce_banks_init();
1422
1423 if (err)
1424 return err;
1425 }
1426
1427 /* Use accurate RIP reporting if available. */
1428 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1429 mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1430
1431 if (cap & MCG_SER_P)
1432 mca_cfg.ser = true;
1433
1434 return 0;
1435 }
1436
1437 static void __mcheck_cpu_init_generic(void)
1438 {
1439 enum mcp_flags m_fl = 0;
1440 mce_banks_t all_banks;
1441 u64 cap;
1442 int i;
1443
1444 if (!mca_cfg.bootlog)
1445 m_fl = MCP_DONTLOG;
1446
1447 /*
1448 * Log the machine checks left over from the previous reset.
1449 */
1450 bitmap_fill(all_banks, MAX_NR_BANKS);
1451 machine_check_poll(MCP_UC | m_fl, &all_banks);
1452
1453 cr4_set_bits(X86_CR4_MCE);
1454
1455 rdmsrl(MSR_IA32_MCG_CAP, cap);
1456 if (cap & MCG_CTL_P)
1457 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1458
1459 for (i = 0; i < mca_cfg.banks; i++) {
1460 struct mce_bank *b = &mce_banks[i];
1461
1462 if (!b->init)
1463 continue;
1464 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
1465 wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
1466 }
1467 }
1468
1469 /*
1470 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1471 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1472 * Vol 3B Table 15-20). But this confuses both the code that determines
1473 * whether the machine check occurred in kernel or user mode, and also
1474 * the severity assessment code. Pretend that EIPV was set, and take the
1475 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1476 */
1477 static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1478 {
1479 if (bank != 0)
1480 return;
1481 if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1482 return;
1483 if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1484 MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1485 MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1486 MCACOD)) !=
1487 (MCI_STATUS_UC|MCI_STATUS_EN|
1488 MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1489 MCI_STATUS_AR|MCACOD_INSTR))
1490 return;
1491
1492 m->mcgstatus |= MCG_STATUS_EIPV;
1493 m->ip = regs->ip;
1494 m->cs = regs->cs;
1495 }
1496
1497 /* Add per CPU specific workarounds here */
1498 static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1499 {
1500 struct mca_config *cfg = &mca_cfg;
1501
1502 if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1503 pr_info("unknown CPU type - not enabling MCE support\n");
1504 return -EOPNOTSUPP;
1505 }
1506
1507 /* This should be disabled by the BIOS, but isn't always */
1508 if (c->x86_vendor == X86_VENDOR_AMD) {
1509 if (c->x86 == 15 && cfg->banks > 4) {
1510 /*
1511 * disable GART TBL walk error reporting, which
1512 * trips off incorrectly with the IOMMU & 3ware
1513 * & Cerberus:
1514 */
1515 clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1516 }
1517 if (c->x86 <= 17 && cfg->bootlog < 0) {
1518 /*
1519 * Lots of broken BIOS around that don't clear them
1520 * by default and leave crap in there. Don't log:
1521 */
1522 cfg->bootlog = 0;
1523 }
1524 /*
1525 * Various K7s with broken bank 0 around. Always disable
1526 * by default.
1527 */
1528 if (c->x86 == 6 && cfg->banks > 0)
1529 mce_banks[0].ctl = 0;
1530
1531 /*
1532 * Turn off MC4_MISC thresholding banks on those models since
1533 * they're not supported there.
1534 */
1535 if (c->x86 == 0x15 &&
1536 (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
1537 int i;
1538 u64 val, hwcr;
1539 bool need_toggle;
1540 u32 msrs[] = {
1541 0x00000413, /* MC4_MISC0 */
1542 0xc0000408, /* MC4_MISC1 */
1543 };
1544
1545 rdmsrl(MSR_K7_HWCR, hwcr);
1546
1547 /* McStatusWrEn has to be set */
1548 need_toggle = !(hwcr & BIT(18));
1549
1550 if (need_toggle)
1551 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
1552
1553 for (i = 0; i < ARRAY_SIZE(msrs); i++) {
1554 rdmsrl(msrs[i], val);
1555
1556 /* CntP bit set? */
1557 if (val & BIT_64(62)) {
1558 val &= ~BIT_64(62);
1559 wrmsrl(msrs[i], val);
1560 }
1561 }
1562
1563 /* restore old settings */
1564 if (need_toggle)
1565 wrmsrl(MSR_K7_HWCR, hwcr);
1566 }
1567 }
1568
1569 if (c->x86_vendor == X86_VENDOR_INTEL) {
1570 /*
1571 * SDM documents that on family 6 bank 0 should not be written
1572 * because it aliases to another special BIOS controlled
1573 * register.
1574 * But it's not aliased anymore on model 0x1a+
1575 * Don't ignore bank 0 completely because there could be a
1576 * valid event later, merely don't write CTL0.
1577 */
1578
1579 if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
1580 mce_banks[0].init = 0;
1581
1582 /*
1583 * All newer Intel systems support MCE broadcasting. Enable
1584 * synchronization with a one second timeout.
1585 */
1586 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1587 cfg->monarch_timeout < 0)
1588 cfg->monarch_timeout = USEC_PER_SEC;
1589
1590 /*
1591 * There are also broken BIOSes on some Pentium M and
1592 * earlier systems:
1593 */
1594 if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1595 cfg->bootlog = 0;
1596
1597 if (c->x86 == 6 && c->x86_model == 45)
1598 quirk_no_way_out = quirk_sandybridge_ifu;
1599 }
1600 if (cfg->monarch_timeout < 0)
1601 cfg->monarch_timeout = 0;
1602 if (cfg->bootlog != 0)
1603 cfg->panic_timeout = 30;
1604
1605 return 0;
1606 }
1607
1608 static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1609 {
1610 if (c->x86 != 5)
1611 return 0;
1612
1613 switch (c->x86_vendor) {
1614 case X86_VENDOR_INTEL:
1615 intel_p5_mcheck_init(c);
1616 return 1;
1617 break;
1618 case X86_VENDOR_CENTAUR:
1619 winchip_mcheck_init(c);
1620 return 1;
1621 break;
1622 }
1623
1624 return 0;
1625 }
1626
1627 static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1628 {
1629 switch (c->x86_vendor) {
1630 case X86_VENDOR_INTEL:
1631 mce_intel_feature_init(c);
1632 mce_adjust_timer = mce_intel_adjust_timer;
1633 break;
1634 case X86_VENDOR_AMD:
1635 mce_amd_feature_init(c);
1636 break;
1637 default:
1638 break;
1639 }
1640 }
1641
1642 static void mce_start_timer(unsigned int cpu, struct timer_list *t)
1643 {
1644 unsigned long iv = check_interval * HZ;
1645
1646 if (mca_cfg.ignore_ce || !iv)
1647 return;
1648
1649 per_cpu(mce_next_interval, cpu) = iv;
1650
1651 t->expires = round_jiffies(jiffies + iv);
1652 add_timer_on(t, cpu);
1653 }
1654
1655 static void __mcheck_cpu_init_timer(void)
1656 {
1657 struct timer_list *t = this_cpu_ptr(&mce_timer);
1658 unsigned int cpu = smp_processor_id();
1659
1660 setup_timer(t, mce_timer_fn, cpu);
1661 mce_start_timer(cpu, t);
1662 }
1663
1664 /* Handle unconfigured int18 (should never happen) */
1665 static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1666 {
1667 pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
1668 smp_processor_id());
1669 }
1670
1671 /* Call the installed machine check handler for this CPU setup. */
1672 void (*machine_check_vector)(struct pt_regs *, long error_code) =
1673 unexpected_machine_check;
1674
1675 /*
1676 * Called for each booted CPU to set up machine checks.
1677 * Must be called with preempt off:
1678 */
1679 void mcheck_cpu_init(struct cpuinfo_x86 *c)
1680 {
1681 if (mca_cfg.disabled)
1682 return;
1683
1684 if (__mcheck_cpu_ancient_init(c))
1685 return;
1686
1687 if (!mce_available(c))
1688 return;
1689
1690 if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
1691 mca_cfg.disabled = true;
1692 return;
1693 }
1694
1695 machine_check_vector = do_machine_check;
1696
1697 __mcheck_cpu_init_generic();
1698 __mcheck_cpu_init_vendor(c);
1699 __mcheck_cpu_init_timer();
1700 INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work);
1701 init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb);
1702 }
1703
1704 /*
1705 * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
1706 */
1707
1708 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
1709 static int mce_chrdev_open_count; /* #times opened */
1710 static int mce_chrdev_open_exclu; /* already open exclusive? */
1711
1712 static int mce_chrdev_open(struct inode *inode, struct file *file)
1713 {
1714 spin_lock(&mce_chrdev_state_lock);
1715
1716 if (mce_chrdev_open_exclu ||
1717 (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
1718 spin_unlock(&mce_chrdev_state_lock);
1719
1720 return -EBUSY;
1721 }
1722
1723 if (file->f_flags & O_EXCL)
1724 mce_chrdev_open_exclu = 1;
1725 mce_chrdev_open_count++;
1726
1727 spin_unlock(&mce_chrdev_state_lock);
1728
1729 return nonseekable_open(inode, file);
1730 }
1731
1732 static int mce_chrdev_release(struct inode *inode, struct file *file)
1733 {
1734 spin_lock(&mce_chrdev_state_lock);
1735
1736 mce_chrdev_open_count--;
1737 mce_chrdev_open_exclu = 0;
1738
1739 spin_unlock(&mce_chrdev_state_lock);
1740
1741 return 0;
1742 }
1743
1744 static void collect_tscs(void *data)
1745 {
1746 unsigned long *cpu_tsc = (unsigned long *)data;
1747
1748 rdtscll(cpu_tsc[smp_processor_id()]);
1749 }
1750
1751 static int mce_apei_read_done;
1752
1753 /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
1754 static int __mce_read_apei(char __user **ubuf, size_t usize)
1755 {
1756 int rc;
1757 u64 record_id;
1758 struct mce m;
1759
1760 if (usize < sizeof(struct mce))
1761 return -EINVAL;
1762
1763 rc = apei_read_mce(&m, &record_id);
1764 /* Error or no more MCE record */
1765 if (rc <= 0) {
1766 mce_apei_read_done = 1;
1767 /*
1768 * When ERST is disabled, mce_chrdev_read() should return
1769 * "no record" instead of "no device."
1770 */
1771 if (rc == -ENODEV)
1772 return 0;
1773 return rc;
1774 }
1775 rc = -EFAULT;
1776 if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
1777 return rc;
1778 /*
1779 * In fact, we should have cleared the record after that has
1780 * been flushed to the disk or sent to network in
1781 * /sbin/mcelog, but we have no interface to support that now,
1782 * so just clear it to avoid duplication.
1783 */
1784 rc = apei_clear_mce(record_id);
1785 if (rc) {
1786 mce_apei_read_done = 1;
1787 return rc;
1788 }
1789 *ubuf += sizeof(struct mce);
1790
1791 return 0;
1792 }
1793
1794 static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
1795 size_t usize, loff_t *off)
1796 {
1797 char __user *buf = ubuf;
1798 unsigned long *cpu_tsc;
1799 unsigned prev, next;
1800 int i, err;
1801
1802 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
1803 if (!cpu_tsc)
1804 return -ENOMEM;
1805
1806 mutex_lock(&mce_chrdev_read_mutex);
1807
1808 if (!mce_apei_read_done) {
1809 err = __mce_read_apei(&buf, usize);
1810 if (err || buf != ubuf)
1811 goto out;
1812 }
1813
1814 next = rcu_dereference_check_mce(mcelog.next);
1815
1816 /* Only supports full reads right now */
1817 err = -EINVAL;
1818 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
1819 goto out;
1820
1821 err = 0;
1822 prev = 0;
1823 do {
1824 for (i = prev; i < next; i++) {
1825 unsigned long start = jiffies;
1826 struct mce *m = &mcelog.entry[i];
1827
1828 while (!m->finished) {
1829 if (time_after_eq(jiffies, start + 2)) {
1830 memset(m, 0, sizeof(*m));
1831 goto timeout;
1832 }
1833 cpu_relax();
1834 }
1835 smp_rmb();
1836 err |= copy_to_user(buf, m, sizeof(*m));
1837 buf += sizeof(*m);
1838 timeout:
1839 ;
1840 }
1841
1842 memset(mcelog.entry + prev, 0,
1843 (next - prev) * sizeof(struct mce));
1844 prev = next;
1845 next = cmpxchg(&mcelog.next, prev, 0);
1846 } while (next != prev);
1847
1848 synchronize_sched();
1849
1850 /*
1851 * Collect entries that were still getting written before the
1852 * synchronize.
1853 */
1854 on_each_cpu(collect_tscs, cpu_tsc, 1);
1855
1856 for (i = next; i < MCE_LOG_LEN; i++) {
1857 struct mce *m = &mcelog.entry[i];
1858
1859 if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
1860 err |= copy_to_user(buf, m, sizeof(*m));
1861 smp_rmb();
1862 buf += sizeof(*m);
1863 memset(m, 0, sizeof(*m));
1864 }
1865 }
1866
1867 if (err)
1868 err = -EFAULT;
1869
1870 out:
1871 mutex_unlock(&mce_chrdev_read_mutex);
1872 kfree(cpu_tsc);
1873
1874 return err ? err : buf - ubuf;
1875 }
1876
1877 static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
1878 {
1879 poll_wait(file, &mce_chrdev_wait, wait);
1880 if (rcu_access_index(mcelog.next))
1881 return POLLIN | POLLRDNORM;
1882 if (!mce_apei_read_done && apei_check_mce())
1883 return POLLIN | POLLRDNORM;
1884 return 0;
1885 }
1886
1887 static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
1888 unsigned long arg)
1889 {
1890 int __user *p = (int __user *)arg;
1891
1892 if (!capable(CAP_SYS_ADMIN))
1893 return -EPERM;
1894
1895 switch (cmd) {
1896 case MCE_GET_RECORD_LEN:
1897 return put_user(sizeof(struct mce), p);
1898 case MCE_GET_LOG_LEN:
1899 return put_user(MCE_LOG_LEN, p);
1900 case MCE_GETCLEAR_FLAGS: {
1901 unsigned flags;
1902
1903 do {
1904 flags = mcelog.flags;
1905 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
1906
1907 return put_user(flags, p);
1908 }
1909 default:
1910 return -ENOTTY;
1911 }
1912 }
1913
1914 static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
1915 size_t usize, loff_t *off);
1916
1917 void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
1918 const char __user *ubuf,
1919 size_t usize, loff_t *off))
1920 {
1921 mce_write = fn;
1922 }
1923 EXPORT_SYMBOL_GPL(register_mce_write_callback);
1924
1925 ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
1926 size_t usize, loff_t *off)
1927 {
1928 if (mce_write)
1929 return mce_write(filp, ubuf, usize, off);
1930 else
1931 return -EINVAL;
1932 }
1933
1934 static const struct file_operations mce_chrdev_ops = {
1935 .open = mce_chrdev_open,
1936 .release = mce_chrdev_release,
1937 .read = mce_chrdev_read,
1938 .write = mce_chrdev_write,
1939 .poll = mce_chrdev_poll,
1940 .unlocked_ioctl = mce_chrdev_ioctl,
1941 .llseek = no_llseek,
1942 };
1943
1944 static struct miscdevice mce_chrdev_device = {
1945 MISC_MCELOG_MINOR,
1946 "mcelog",
1947 &mce_chrdev_ops,
1948 };
1949
1950 static void __mce_disable_bank(void *arg)
1951 {
1952 int bank = *((int *)arg);
1953 __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
1954 cmci_disable_bank(bank);
1955 }
1956
1957 void mce_disable_bank(int bank)
1958 {
1959 if (bank >= mca_cfg.banks) {
1960 pr_warn(FW_BUG
1961 "Ignoring request to disable invalid MCA bank %d.\n",
1962 bank);
1963 return;
1964 }
1965 set_bit(bank, mce_banks_ce_disabled);
1966 on_each_cpu(__mce_disable_bank, &bank, 1);
1967 }
1968
1969 /*
1970 * mce=off Disables machine check
1971 * mce=no_cmci Disables CMCI
1972 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
1973 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
1974 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
1975 * monarchtimeout is how long to wait for other CPUs on machine
1976 * check, or 0 to not wait
1977 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
1978 * mce=nobootlog Don't log MCEs from before booting.
1979 * mce=bios_cmci_threshold Don't program the CMCI threshold
1980 */
1981 static int __init mcheck_enable(char *str)
1982 {
1983 struct mca_config *cfg = &mca_cfg;
1984
1985 if (*str == 0) {
1986 enable_p5_mce();
1987 return 1;
1988 }
1989 if (*str == '=')
1990 str++;
1991 if (!strcmp(str, "off"))
1992 cfg->disabled = true;
1993 else if (!strcmp(str, "no_cmci"))
1994 cfg->cmci_disabled = true;
1995 else if (!strcmp(str, "dont_log_ce"))
1996 cfg->dont_log_ce = true;
1997 else if (!strcmp(str, "ignore_ce"))
1998 cfg->ignore_ce = true;
1999 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
2000 cfg->bootlog = (str[0] == 'b');
2001 else if (!strcmp(str, "bios_cmci_threshold"))
2002 cfg->bios_cmci_threshold = true;
2003 else if (isdigit(str[0])) {
2004 get_option(&str, &(cfg->tolerant));
2005 if (*str == ',') {
2006 ++str;
2007 get_option(&str, &(cfg->monarch_timeout));
2008 }
2009 } else {
2010 pr_info("mce argument %s ignored. Please use /sys\n", str);
2011 return 0;
2012 }
2013 return 1;
2014 }
2015 __setup("mce", mcheck_enable);
2016
2017 int __init mcheck_init(void)
2018 {
2019 mcheck_intel_therm_init();
2020
2021 return 0;
2022 }
2023
2024 /*
2025 * mce_syscore: PM support
2026 */
2027
2028 /*
2029 * Disable machine checks on suspend and shutdown. We can't really handle
2030 * them later.
2031 */
2032 static int mce_disable_error_reporting(void)
2033 {
2034 int i;
2035
2036 for (i = 0; i < mca_cfg.banks; i++) {
2037 struct mce_bank *b = &mce_banks[i];
2038
2039 if (b->init)
2040 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
2041 }
2042 return 0;
2043 }
2044
2045 static int mce_syscore_suspend(void)
2046 {
2047 return mce_disable_error_reporting();
2048 }
2049
2050 static void mce_syscore_shutdown(void)
2051 {
2052 mce_disable_error_reporting();
2053 }
2054
2055 /*
2056 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2057 * Only one CPU is active at this time, the others get re-added later using
2058 * CPU hotplug:
2059 */
2060 static void mce_syscore_resume(void)
2061 {
2062 __mcheck_cpu_init_generic();
2063 __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2064 }
2065
2066 static struct syscore_ops mce_syscore_ops = {
2067 .suspend = mce_syscore_suspend,
2068 .shutdown = mce_syscore_shutdown,
2069 .resume = mce_syscore_resume,
2070 };
2071
2072 /*
2073 * mce_device: Sysfs support
2074 */
2075
2076 static void mce_cpu_restart(void *data)
2077 {
2078 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2079 return;
2080 __mcheck_cpu_init_generic();
2081 __mcheck_cpu_init_timer();
2082 }
2083
2084 /* Reinit MCEs after user configuration changes */
2085 static void mce_restart(void)
2086 {
2087 mce_timer_delete_all();
2088 on_each_cpu(mce_cpu_restart, NULL, 1);
2089 }
2090
2091 /* Toggle features for corrected errors */
2092 static void mce_disable_cmci(void *data)
2093 {
2094 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2095 return;
2096 cmci_clear();
2097 }
2098
2099 static void mce_enable_ce(void *all)
2100 {
2101 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2102 return;
2103 cmci_reenable();
2104 cmci_recheck();
2105 if (all)
2106 __mcheck_cpu_init_timer();
2107 }
2108
2109 static struct bus_type mce_subsys = {
2110 .name = "machinecheck",
2111 .dev_name = "machinecheck",
2112 };
2113
2114 DEFINE_PER_CPU(struct device *, mce_device);
2115
2116 void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
2117
2118 static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
2119 {
2120 return container_of(attr, struct mce_bank, attr);
2121 }
2122
2123 static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2124 char *buf)
2125 {
2126 return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
2127 }
2128
2129 static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2130 const char *buf, size_t size)
2131 {
2132 u64 new;
2133
2134 if (kstrtou64(buf, 0, &new) < 0)
2135 return -EINVAL;
2136
2137 attr_to_bank(attr)->ctl = new;
2138 mce_restart();
2139
2140 return size;
2141 }
2142
2143 static ssize_t
2144 show_trigger(struct device *s, struct device_attribute *attr, char *buf)
2145 {
2146 strcpy(buf, mce_helper);
2147 strcat(buf, "\n");
2148 return strlen(mce_helper) + 1;
2149 }
2150
2151 static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
2152 const char *buf, size_t siz)
2153 {
2154 char *p;
2155
2156 strncpy(mce_helper, buf, sizeof(mce_helper));
2157 mce_helper[sizeof(mce_helper)-1] = 0;
2158 p = strchr(mce_helper, '\n');
2159
2160 if (p)
2161 *p = 0;
2162
2163 return strlen(mce_helper) + !!p;
2164 }
2165
2166 static ssize_t set_ignore_ce(struct device *s,
2167 struct device_attribute *attr,
2168 const char *buf, size_t size)
2169 {
2170 u64 new;
2171
2172 if (kstrtou64(buf, 0, &new) < 0)
2173 return -EINVAL;
2174
2175 if (mca_cfg.ignore_ce ^ !!new) {
2176 if (new) {
2177 /* disable ce features */
2178 mce_timer_delete_all();
2179 on_each_cpu(mce_disable_cmci, NULL, 1);
2180 mca_cfg.ignore_ce = true;
2181 } else {
2182 /* enable ce features */
2183 mca_cfg.ignore_ce = false;
2184 on_each_cpu(mce_enable_ce, (void *)1, 1);
2185 }
2186 }
2187 return size;
2188 }
2189
2190 static ssize_t set_cmci_disabled(struct device *s,
2191 struct device_attribute *attr,
2192 const char *buf, size_t size)
2193 {
2194 u64 new;
2195
2196 if (kstrtou64(buf, 0, &new) < 0)
2197 return -EINVAL;
2198
2199 if (mca_cfg.cmci_disabled ^ !!new) {
2200 if (new) {
2201 /* disable cmci */
2202 on_each_cpu(mce_disable_cmci, NULL, 1);
2203 mca_cfg.cmci_disabled = true;
2204 } else {
2205 /* enable cmci */
2206 mca_cfg.cmci_disabled = false;
2207 on_each_cpu(mce_enable_ce, NULL, 1);
2208 }
2209 }
2210 return size;
2211 }
2212
2213 static ssize_t store_int_with_restart(struct device *s,
2214 struct device_attribute *attr,
2215 const char *buf, size_t size)
2216 {
2217 ssize_t ret = device_store_int(s, attr, buf, size);
2218 mce_restart();
2219 return ret;
2220 }
2221
2222 static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
2223 static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2224 static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2225 static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2226
2227 static struct dev_ext_attribute dev_attr_check_interval = {
2228 __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2229 &check_interval
2230 };
2231
2232 static struct dev_ext_attribute dev_attr_ignore_ce = {
2233 __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2234 &mca_cfg.ignore_ce
2235 };
2236
2237 static struct dev_ext_attribute dev_attr_cmci_disabled = {
2238 __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2239 &mca_cfg.cmci_disabled
2240 };
2241
2242 static struct device_attribute *mce_device_attrs[] = {
2243 &dev_attr_tolerant.attr,
2244 &dev_attr_check_interval.attr,
2245 &dev_attr_trigger,
2246 &dev_attr_monarch_timeout.attr,
2247 &dev_attr_dont_log_ce.attr,
2248 &dev_attr_ignore_ce.attr,
2249 &dev_attr_cmci_disabled.attr,
2250 NULL
2251 };
2252
2253 static cpumask_var_t mce_device_initialized;
2254
2255 static void mce_device_release(struct device *dev)
2256 {
2257 kfree(dev);
2258 }
2259
2260 /* Per cpu device init. All of the cpus still share the same ctrl bank: */
2261 static int mce_device_create(unsigned int cpu)
2262 {
2263 struct device *dev;
2264 int err;
2265 int i, j;
2266
2267 if (!mce_available(&boot_cpu_data))
2268 return -EIO;
2269
2270 dev = kzalloc(sizeof *dev, GFP_KERNEL);
2271 if (!dev)
2272 return -ENOMEM;
2273 dev->id = cpu;
2274 dev->bus = &mce_subsys;
2275 dev->release = &mce_device_release;
2276
2277 err = device_register(dev);
2278 if (err) {
2279 put_device(dev);
2280 return err;
2281 }
2282
2283 for (i = 0; mce_device_attrs[i]; i++) {
2284 err = device_create_file(dev, mce_device_attrs[i]);
2285 if (err)
2286 goto error;
2287 }
2288 for (j = 0; j < mca_cfg.banks; j++) {
2289 err = device_create_file(dev, &mce_banks[j].attr);
2290 if (err)
2291 goto error2;
2292 }
2293 cpumask_set_cpu(cpu, mce_device_initialized);
2294 per_cpu(mce_device, cpu) = dev;
2295
2296 return 0;
2297 error2:
2298 while (--j >= 0)
2299 device_remove_file(dev, &mce_banks[j].attr);
2300 error:
2301 while (--i >= 0)
2302 device_remove_file(dev, mce_device_attrs[i]);
2303
2304 device_unregister(dev);
2305
2306 return err;
2307 }
2308
2309 static void mce_device_remove(unsigned int cpu)
2310 {
2311 struct device *dev = per_cpu(mce_device, cpu);
2312 int i;
2313
2314 if (!cpumask_test_cpu(cpu, mce_device_initialized))
2315 return;
2316
2317 for (i = 0; mce_device_attrs[i]; i++)
2318 device_remove_file(dev, mce_device_attrs[i]);
2319
2320 for (i = 0; i < mca_cfg.banks; i++)
2321 device_remove_file(dev, &mce_banks[i].attr);
2322
2323 device_unregister(dev);
2324 cpumask_clear_cpu(cpu, mce_device_initialized);
2325 per_cpu(mce_device, cpu) = NULL;
2326 }
2327
2328 /* Make sure there are no machine checks on offlined CPUs. */
2329 static void mce_disable_cpu(void *h)
2330 {
2331 unsigned long action = *(unsigned long *)h;
2332 int i;
2333
2334 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2335 return;
2336
2337 if (!(action & CPU_TASKS_FROZEN))
2338 cmci_clear();
2339 for (i = 0; i < mca_cfg.banks; i++) {
2340 struct mce_bank *b = &mce_banks[i];
2341
2342 if (b->init)
2343 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
2344 }
2345 }
2346
2347 static void mce_reenable_cpu(void *h)
2348 {
2349 unsigned long action = *(unsigned long *)h;
2350 int i;
2351
2352 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2353 return;
2354
2355 if (!(action & CPU_TASKS_FROZEN))
2356 cmci_reenable();
2357 for (i = 0; i < mca_cfg.banks; i++) {
2358 struct mce_bank *b = &mce_banks[i];
2359
2360 if (b->init)
2361 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
2362 }
2363 }
2364
2365 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
2366 static int
2367 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2368 {
2369 unsigned int cpu = (unsigned long)hcpu;
2370 struct timer_list *t = &per_cpu(mce_timer, cpu);
2371
2372 switch (action & ~CPU_TASKS_FROZEN) {
2373 case CPU_ONLINE:
2374 mce_device_create(cpu);
2375 if (threshold_cpu_callback)
2376 threshold_cpu_callback(action, cpu);
2377 break;
2378 case CPU_DEAD:
2379 if (threshold_cpu_callback)
2380 threshold_cpu_callback(action, cpu);
2381 mce_device_remove(cpu);
2382 mce_intel_hcpu_update(cpu);
2383
2384 /* intentionally ignoring frozen here */
2385 if (!(action & CPU_TASKS_FROZEN))
2386 cmci_rediscover();
2387 break;
2388 case CPU_DOWN_PREPARE:
2389 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
2390 del_timer_sync(t);
2391 break;
2392 case CPU_DOWN_FAILED:
2393 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
2394 mce_start_timer(cpu, t);
2395 break;
2396 }
2397
2398 return NOTIFY_OK;
2399 }
2400
2401 static struct notifier_block mce_cpu_notifier = {
2402 .notifier_call = mce_cpu_callback,
2403 };
2404
2405 static __init void mce_init_banks(void)
2406 {
2407 int i;
2408
2409 for (i = 0; i < mca_cfg.banks; i++) {
2410 struct mce_bank *b = &mce_banks[i];
2411 struct device_attribute *a = &b->attr;
2412
2413 sysfs_attr_init(&a->attr);
2414 a->attr.name = b->attrname;
2415 snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2416
2417 a->attr.mode = 0644;
2418 a->show = show_bank;
2419 a->store = set_bank;
2420 }
2421 }
2422
2423 static __init int mcheck_init_device(void)
2424 {
2425 int err;
2426 int i = 0;
2427
2428 if (!mce_available(&boot_cpu_data)) {
2429 err = -EIO;
2430 goto err_out;
2431 }
2432
2433 if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2434 err = -ENOMEM;
2435 goto err_out;
2436 }
2437
2438 mce_init_banks();
2439
2440 err = subsys_system_register(&mce_subsys, NULL);
2441 if (err)
2442 goto err_out_mem;
2443
2444 cpu_notifier_register_begin();
2445 for_each_online_cpu(i) {
2446 err = mce_device_create(i);
2447 if (err) {
2448 /*
2449 * Register notifier anyway (and do not unreg it) so
2450 * that we don't leave undeleted timers, see notifier
2451 * callback above.
2452 */
2453 __register_hotcpu_notifier(&mce_cpu_notifier);
2454 cpu_notifier_register_done();
2455 goto err_device_create;
2456 }
2457 }
2458
2459 __register_hotcpu_notifier(&mce_cpu_notifier);
2460 cpu_notifier_register_done();
2461
2462 register_syscore_ops(&mce_syscore_ops);
2463
2464 /* register character device /dev/mcelog */
2465 err = misc_register(&mce_chrdev_device);
2466 if (err)
2467 goto err_register;
2468
2469 return 0;
2470
2471 err_register:
2472 unregister_syscore_ops(&mce_syscore_ops);
2473
2474 err_device_create:
2475 /*
2476 * We didn't keep track of which devices were created above, but
2477 * even if we had, the set of online cpus might have changed.
2478 * Play safe and remove for every possible cpu, since
2479 * mce_device_remove() will do the right thing.
2480 */
2481 for_each_possible_cpu(i)
2482 mce_device_remove(i);
2483
2484 err_out_mem:
2485 free_cpumask_var(mce_device_initialized);
2486
2487 err_out:
2488 pr_err("Unable to init device /dev/mcelog (rc: %d)\n", err);
2489
2490 return err;
2491 }
2492 device_initcall_sync(mcheck_init_device);
2493
2494 /*
2495 * Old style boot options parsing. Only for compatibility.
2496 */
2497 static int __init mcheck_disable(char *str)
2498 {
2499 mca_cfg.disabled = true;
2500 return 1;
2501 }
2502 __setup("nomce", mcheck_disable);
2503
2504 #ifdef CONFIG_DEBUG_FS
2505 struct dentry *mce_get_debugfs_dir(void)
2506 {
2507 static struct dentry *dmce;
2508
2509 if (!dmce)
2510 dmce = debugfs_create_dir("mce", NULL);
2511
2512 return dmce;
2513 }
2514
2515 static void mce_reset(void)
2516 {
2517 cpu_missing = 0;
2518 atomic_set(&mce_fake_panicked, 0);
2519 atomic_set(&mce_executing, 0);
2520 atomic_set(&mce_callin, 0);
2521 atomic_set(&global_nwo, 0);
2522 }
2523
2524 static int fake_panic_get(void *data, u64 *val)
2525 {
2526 *val = fake_panic;
2527 return 0;
2528 }
2529
2530 static int fake_panic_set(void *data, u64 val)
2531 {
2532 mce_reset();
2533 fake_panic = val;
2534 return 0;
2535 }
2536
2537 DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
2538 fake_panic_set, "%llu\n");
2539
2540 static int __init mcheck_debugfs_init(void)
2541 {
2542 struct dentry *dmce, *ffake_panic;
2543
2544 dmce = mce_get_debugfs_dir();
2545 if (!dmce)
2546 return -ENOMEM;
2547 ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
2548 &fake_panic_fops);
2549 if (!ffake_panic)
2550 return -ENOMEM;
2551
2552 return 0;
2553 }
2554 late_initcall(mcheck_debugfs_init);
2555 #endif
This page took 0.141101 seconds and 6 git commands to generate.