x86/mce: Reindent __mcheck_cpu_apply_quirks() properly
[deliverable/linux.git] / arch / x86 / kernel / cpu / mcheck / mce.c
1 /*
2 * Machine check handler.
3 *
4 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 * Rest from unknown author(s).
6 * 2004 Andi Kleen. Rewrote most of it.
7 * Copyright 2008 Intel Corporation
8 * Author: Andi Kleen
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/thread_info.h>
14 #include <linux/capability.h>
15 #include <linux/miscdevice.h>
16 #include <linux/ratelimit.h>
17 #include <linux/kallsyms.h>
18 #include <linux/rcupdate.h>
19 #include <linux/kobject.h>
20 #include <linux/uaccess.h>
21 #include <linux/kdebug.h>
22 #include <linux/kernel.h>
23 #include <linux/percpu.h>
24 #include <linux/string.h>
25 #include <linux/device.h>
26 #include <linux/syscore_ops.h>
27 #include <linux/delay.h>
28 #include <linux/ctype.h>
29 #include <linux/sched.h>
30 #include <linux/sysfs.h>
31 #include <linux/types.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/kmod.h>
35 #include <linux/poll.h>
36 #include <linux/nmi.h>
37 #include <linux/cpu.h>
38 #include <linux/smp.h>
39 #include <linux/fs.h>
40 #include <linux/mm.h>
41 #include <linux/debugfs.h>
42 #include <linux/irq_work.h>
43 #include <linux/export.h>
44
45 #include <asm/processor.h>
46 #include <asm/traps.h>
47 #include <asm/mce.h>
48 #include <asm/msr.h>
49
50 #include "mce-internal.h"
51
52 static DEFINE_MUTEX(mce_chrdev_read_mutex);
53
54 #define rcu_dereference_check_mce(p) \
55 rcu_dereference_index_check((p), \
56 rcu_read_lock_sched_held() || \
57 lockdep_is_held(&mce_chrdev_read_mutex))
58
59 #define CREATE_TRACE_POINTS
60 #include <trace/events/mce.h>
61
62 #define SPINUNIT 100 /* 100ns */
63
64 DEFINE_PER_CPU(unsigned, mce_exception_count);
65
66 struct mce_bank *mce_banks __read_mostly;
67
68 struct mca_config mca_cfg __read_mostly = {
69 .bootlog = -1,
70 /*
71 * Tolerant levels:
72 * 0: always panic on uncorrected errors, log corrected errors
73 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
74 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
75 * 3: never panic or SIGBUS, log all errors (for testing only)
76 */
77 .tolerant = 1,
78 .monarch_timeout = -1
79 };
80
81 /* User mode helper program triggered by machine check event */
82 static unsigned long mce_need_notify;
83 static char mce_helper[128];
84 static char *mce_helper_argv[2] = { mce_helper, NULL };
85
86 static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
87
88 static DEFINE_PER_CPU(struct mce, mces_seen);
89 static int cpu_missing;
90
91 /*
92 * MCA banks polled by the period polling timer for corrected events.
93 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
94 */
95 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
96 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
97 };
98
99 /*
100 * MCA banks controlled through firmware first for corrected errors.
101 * This is a global list of banks for which we won't enable CMCI and we
102 * won't poll. Firmware controls these banks and is responsible for
103 * reporting corrected errors through GHES. Uncorrected/recoverable
104 * errors are still notified through a machine check.
105 */
106 mce_banks_t mce_banks_ce_disabled;
107
108 static DEFINE_PER_CPU(struct work_struct, mce_work);
109
110 static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
111
112 /*
113 * CPU/chipset specific EDAC code can register a notifier call here to print
114 * MCE errors in a human-readable form.
115 */
116 static ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
117
118 /* Do initial initialization of a struct mce */
119 void mce_setup(struct mce *m)
120 {
121 memset(m, 0, sizeof(struct mce));
122 m->cpu = m->extcpu = smp_processor_id();
123 rdtscll(m->tsc);
124 /* We hope get_seconds stays lockless */
125 m->time = get_seconds();
126 m->cpuvendor = boot_cpu_data.x86_vendor;
127 m->cpuid = cpuid_eax(1);
128 m->socketid = cpu_data(m->extcpu).phys_proc_id;
129 m->apicid = cpu_data(m->extcpu).initial_apicid;
130 rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
131 }
132
133 DEFINE_PER_CPU(struct mce, injectm);
134 EXPORT_PER_CPU_SYMBOL_GPL(injectm);
135
136 /*
137 * Lockless MCE logging infrastructure.
138 * This avoids deadlocks on printk locks without having to break locks. Also
139 * separate MCEs from kernel messages to avoid bogus bug reports.
140 */
141
142 static struct mce_log mcelog = {
143 .signature = MCE_LOG_SIGNATURE,
144 .len = MCE_LOG_LEN,
145 .recordlen = sizeof(struct mce),
146 };
147
148 void mce_log(struct mce *mce)
149 {
150 unsigned next, entry;
151 int ret = 0;
152
153 /* Emit the trace record: */
154 trace_mce_record(mce);
155
156 ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
157 if (ret == NOTIFY_STOP)
158 return;
159
160 mce->finished = 0;
161 wmb();
162 for (;;) {
163 entry = rcu_dereference_check_mce(mcelog.next);
164 for (;;) {
165
166 /*
167 * When the buffer fills up discard new entries.
168 * Assume that the earlier errors are the more
169 * interesting ones:
170 */
171 if (entry >= MCE_LOG_LEN) {
172 set_bit(MCE_OVERFLOW,
173 (unsigned long *)&mcelog.flags);
174 return;
175 }
176 /* Old left over entry. Skip: */
177 if (mcelog.entry[entry].finished) {
178 entry++;
179 continue;
180 }
181 break;
182 }
183 smp_rmb();
184 next = entry + 1;
185 if (cmpxchg(&mcelog.next, entry, next) == entry)
186 break;
187 }
188 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
189 wmb();
190 mcelog.entry[entry].finished = 1;
191 wmb();
192
193 mce->finished = 1;
194 set_bit(0, &mce_need_notify);
195 }
196
197 static void drain_mcelog_buffer(void)
198 {
199 unsigned int next, i, prev = 0;
200
201 next = ACCESS_ONCE(mcelog.next);
202
203 do {
204 struct mce *m;
205
206 /* drain what was logged during boot */
207 for (i = prev; i < next; i++) {
208 unsigned long start = jiffies;
209 unsigned retries = 1;
210
211 m = &mcelog.entry[i];
212
213 while (!m->finished) {
214 if (time_after_eq(jiffies, start + 2*retries))
215 retries++;
216
217 cpu_relax();
218
219 if (!m->finished && retries >= 4) {
220 pr_err("skipping error being logged currently!\n");
221 break;
222 }
223 }
224 smp_rmb();
225 atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
226 }
227
228 memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m));
229 prev = next;
230 next = cmpxchg(&mcelog.next, prev, 0);
231 } while (next != prev);
232 }
233
234
235 void mce_register_decode_chain(struct notifier_block *nb)
236 {
237 atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
238 drain_mcelog_buffer();
239 }
240 EXPORT_SYMBOL_GPL(mce_register_decode_chain);
241
242 void mce_unregister_decode_chain(struct notifier_block *nb)
243 {
244 atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
245 }
246 EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
247
248 static void print_mce(struct mce *m)
249 {
250 int ret = 0;
251
252 pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
253 m->extcpu, m->mcgstatus, m->bank, m->status);
254
255 if (m->ip) {
256 pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
257 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
258 m->cs, m->ip);
259
260 if (m->cs == __KERNEL_CS)
261 print_symbol("{%s}", m->ip);
262 pr_cont("\n");
263 }
264
265 pr_emerg(HW_ERR "TSC %llx ", m->tsc);
266 if (m->addr)
267 pr_cont("ADDR %llx ", m->addr);
268 if (m->misc)
269 pr_cont("MISC %llx ", m->misc);
270
271 pr_cont("\n");
272 /*
273 * Note this output is parsed by external tools and old fields
274 * should not be changed.
275 */
276 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
277 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
278 cpu_data(m->extcpu).microcode);
279
280 /*
281 * Print out human-readable details about the MCE error,
282 * (if the CPU has an implementation for that)
283 */
284 ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
285 if (ret == NOTIFY_STOP)
286 return;
287
288 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
289 }
290
291 #define PANIC_TIMEOUT 5 /* 5 seconds */
292
293 static atomic_t mce_panicked;
294
295 static int fake_panic;
296 static atomic_t mce_fake_panicked;
297
298 /* Panic in progress. Enable interrupts and wait for final IPI */
299 static void wait_for_panic(void)
300 {
301 long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
302
303 preempt_disable();
304 local_irq_enable();
305 while (timeout-- > 0)
306 udelay(1);
307 if (panic_timeout == 0)
308 panic_timeout = mca_cfg.panic_timeout;
309 panic("Panicing machine check CPU died");
310 }
311
312 static void mce_panic(const char *msg, struct mce *final, char *exp)
313 {
314 int i, apei_err = 0;
315
316 if (!fake_panic) {
317 /*
318 * Make sure only one CPU runs in machine check panic
319 */
320 if (atomic_inc_return(&mce_panicked) > 1)
321 wait_for_panic();
322 barrier();
323
324 bust_spinlocks(1);
325 console_verbose();
326 } else {
327 /* Don't log too much for fake panic */
328 if (atomic_inc_return(&mce_fake_panicked) > 1)
329 return;
330 }
331 /* First print corrected ones that are still unlogged */
332 for (i = 0; i < MCE_LOG_LEN; i++) {
333 struct mce *m = &mcelog.entry[i];
334 if (!(m->status & MCI_STATUS_VAL))
335 continue;
336 if (!(m->status & MCI_STATUS_UC)) {
337 print_mce(m);
338 if (!apei_err)
339 apei_err = apei_write_mce(m);
340 }
341 }
342 /* Now print uncorrected but with the final one last */
343 for (i = 0; i < MCE_LOG_LEN; i++) {
344 struct mce *m = &mcelog.entry[i];
345 if (!(m->status & MCI_STATUS_VAL))
346 continue;
347 if (!(m->status & MCI_STATUS_UC))
348 continue;
349 if (!final || memcmp(m, final, sizeof(struct mce))) {
350 print_mce(m);
351 if (!apei_err)
352 apei_err = apei_write_mce(m);
353 }
354 }
355 if (final) {
356 print_mce(final);
357 if (!apei_err)
358 apei_err = apei_write_mce(final);
359 }
360 if (cpu_missing)
361 pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
362 if (exp)
363 pr_emerg(HW_ERR "Machine check: %s\n", exp);
364 if (!fake_panic) {
365 if (panic_timeout == 0)
366 panic_timeout = mca_cfg.panic_timeout;
367 panic(msg);
368 } else
369 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
370 }
371
372 /* Support code for software error injection */
373
374 static int msr_to_offset(u32 msr)
375 {
376 unsigned bank = __this_cpu_read(injectm.bank);
377
378 if (msr == mca_cfg.rip_msr)
379 return offsetof(struct mce, ip);
380 if (msr == MSR_IA32_MCx_STATUS(bank))
381 return offsetof(struct mce, status);
382 if (msr == MSR_IA32_MCx_ADDR(bank))
383 return offsetof(struct mce, addr);
384 if (msr == MSR_IA32_MCx_MISC(bank))
385 return offsetof(struct mce, misc);
386 if (msr == MSR_IA32_MCG_STATUS)
387 return offsetof(struct mce, mcgstatus);
388 return -1;
389 }
390
391 /* MSR access wrappers used for error injection */
392 static u64 mce_rdmsrl(u32 msr)
393 {
394 u64 v;
395
396 if (__this_cpu_read(injectm.finished)) {
397 int offset = msr_to_offset(msr);
398
399 if (offset < 0)
400 return 0;
401 return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
402 }
403
404 if (rdmsrl_safe(msr, &v)) {
405 WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
406 /*
407 * Return zero in case the access faulted. This should
408 * not happen normally but can happen if the CPU does
409 * something weird, or if the code is buggy.
410 */
411 v = 0;
412 }
413
414 return v;
415 }
416
417 static void mce_wrmsrl(u32 msr, u64 v)
418 {
419 if (__this_cpu_read(injectm.finished)) {
420 int offset = msr_to_offset(msr);
421
422 if (offset >= 0)
423 *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
424 return;
425 }
426 wrmsrl(msr, v);
427 }
428
429 /*
430 * Collect all global (w.r.t. this processor) status about this machine
431 * check into our "mce" struct so that we can use it later to assess
432 * the severity of the problem as we read per-bank specific details.
433 */
434 static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
435 {
436 mce_setup(m);
437
438 m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
439 if (regs) {
440 /*
441 * Get the address of the instruction at the time of
442 * the machine check error.
443 */
444 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
445 m->ip = regs->ip;
446 m->cs = regs->cs;
447
448 /*
449 * When in VM86 mode make the cs look like ring 3
450 * always. This is a lie, but it's better than passing
451 * the additional vm86 bit around everywhere.
452 */
453 if (v8086_mode(regs))
454 m->cs |= 3;
455 }
456 /* Use accurate RIP reporting if available. */
457 if (mca_cfg.rip_msr)
458 m->ip = mce_rdmsrl(mca_cfg.rip_msr);
459 }
460 }
461
462 /*
463 * Simple lockless ring to communicate PFNs from the exception handler with the
464 * process context work function. This is vastly simplified because there's
465 * only a single reader and a single writer.
466 */
467 #define MCE_RING_SIZE 16 /* we use one entry less */
468
469 struct mce_ring {
470 unsigned short start;
471 unsigned short end;
472 unsigned long ring[MCE_RING_SIZE];
473 };
474 static DEFINE_PER_CPU(struct mce_ring, mce_ring);
475
476 /* Runs with CPU affinity in workqueue */
477 static int mce_ring_empty(void)
478 {
479 struct mce_ring *r = this_cpu_ptr(&mce_ring);
480
481 return r->start == r->end;
482 }
483
484 static int mce_ring_get(unsigned long *pfn)
485 {
486 struct mce_ring *r;
487 int ret = 0;
488
489 *pfn = 0;
490 get_cpu();
491 r = this_cpu_ptr(&mce_ring);
492 if (r->start == r->end)
493 goto out;
494 *pfn = r->ring[r->start];
495 r->start = (r->start + 1) % MCE_RING_SIZE;
496 ret = 1;
497 out:
498 put_cpu();
499 return ret;
500 }
501
502 /* Always runs in MCE context with preempt off */
503 static int mce_ring_add(unsigned long pfn)
504 {
505 struct mce_ring *r = this_cpu_ptr(&mce_ring);
506 unsigned next;
507
508 next = (r->end + 1) % MCE_RING_SIZE;
509 if (next == r->start)
510 return -1;
511 r->ring[r->end] = pfn;
512 wmb();
513 r->end = next;
514 return 0;
515 }
516
517 int mce_available(struct cpuinfo_x86 *c)
518 {
519 if (mca_cfg.disabled)
520 return 0;
521 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
522 }
523
524 static void mce_schedule_work(void)
525 {
526 if (!mce_ring_empty())
527 schedule_work(this_cpu_ptr(&mce_work));
528 }
529
530 static DEFINE_PER_CPU(struct irq_work, mce_irq_work);
531
532 static void mce_irq_work_cb(struct irq_work *entry)
533 {
534 mce_notify_irq();
535 mce_schedule_work();
536 }
537
538 static void mce_report_event(struct pt_regs *regs)
539 {
540 if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
541 mce_notify_irq();
542 /*
543 * Triggering the work queue here is just an insurance
544 * policy in case the syscall exit notify handler
545 * doesn't run soon enough or ends up running on the
546 * wrong CPU (can happen when audit sleeps)
547 */
548 mce_schedule_work();
549 return;
550 }
551
552 irq_work_queue(this_cpu_ptr(&mce_irq_work));
553 }
554
555 /*
556 * Read ADDR and MISC registers.
557 */
558 static void mce_read_aux(struct mce *m, int i)
559 {
560 if (m->status & MCI_STATUS_MISCV)
561 m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
562 if (m->status & MCI_STATUS_ADDRV) {
563 m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
564
565 /*
566 * Mask the reported address by the reported granularity.
567 */
568 if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
569 u8 shift = MCI_MISC_ADDR_LSB(m->misc);
570 m->addr >>= shift;
571 m->addr <<= shift;
572 }
573 }
574 }
575
576 static bool memory_error(struct mce *m)
577 {
578 struct cpuinfo_x86 *c = &boot_cpu_data;
579
580 if (c->x86_vendor == X86_VENDOR_AMD) {
581 /*
582 * coming soon
583 */
584 return false;
585 } else if (c->x86_vendor == X86_VENDOR_INTEL) {
586 /*
587 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
588 *
589 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
590 * indicating a memory error. Bit 8 is used for indicating a
591 * cache hierarchy error. The combination of bit 2 and bit 3
592 * is used for indicating a `generic' cache hierarchy error
593 * But we can't just blindly check the above bits, because if
594 * bit 11 is set, then it is a bus/interconnect error - and
595 * either way the above bits just gives more detail on what
596 * bus/interconnect error happened. Note that bit 12 can be
597 * ignored, as it's the "filter" bit.
598 */
599 return (m->status & 0xef80) == BIT(7) ||
600 (m->status & 0xef00) == BIT(8) ||
601 (m->status & 0xeffc) == 0xc;
602 }
603
604 return false;
605 }
606
607 DEFINE_PER_CPU(unsigned, mce_poll_count);
608
609 /*
610 * Poll for corrected events or events that happened before reset.
611 * Those are just logged through /dev/mcelog.
612 *
613 * This is executed in standard interrupt context.
614 *
615 * Note: spec recommends to panic for fatal unsignalled
616 * errors here. However this would be quite problematic --
617 * we would need to reimplement the Monarch handling and
618 * it would mess up the exclusion between exception handler
619 * and poll hander -- * so we skip this for now.
620 * These cases should not happen anyways, or only when the CPU
621 * is already totally * confused. In this case it's likely it will
622 * not fully execute the machine check handler either.
623 */
624 bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
625 {
626 bool error_logged = false;
627 struct mce m;
628 int severity;
629 int i;
630
631 this_cpu_inc(mce_poll_count);
632
633 mce_gather_info(&m, NULL);
634
635 for (i = 0; i < mca_cfg.banks; i++) {
636 if (!mce_banks[i].ctl || !test_bit(i, *b))
637 continue;
638
639 m.misc = 0;
640 m.addr = 0;
641 m.bank = i;
642 m.tsc = 0;
643
644 barrier();
645 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
646 if (!(m.status & MCI_STATUS_VAL))
647 continue;
648
649
650 /*
651 * Uncorrected or signalled events are handled by the exception
652 * handler when it is enabled, so don't process those here.
653 *
654 * TBD do the same check for MCI_STATUS_EN here?
655 */
656 if (!(flags & MCP_UC) &&
657 (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
658 continue;
659
660 mce_read_aux(&m, i);
661
662 if (!(flags & MCP_TIMESTAMP))
663 m.tsc = 0;
664
665 severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
666
667 /*
668 * In the cases where we don't have a valid address after all,
669 * do not add it into the ring buffer.
670 */
671 if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m)) {
672 if (m.status & MCI_STATUS_ADDRV) {
673 mce_ring_add(m.addr >> PAGE_SHIFT);
674 mce_schedule_work();
675 }
676 }
677
678 /*
679 * Don't get the IP here because it's unlikely to
680 * have anything to do with the actual error location.
681 */
682 if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce) {
683 error_logged = true;
684 mce_log(&m);
685 }
686
687 /*
688 * Clear state for this bank.
689 */
690 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
691 }
692
693 /*
694 * Don't clear MCG_STATUS here because it's only defined for
695 * exceptions.
696 */
697
698 sync_core();
699
700 return error_logged;
701 }
702 EXPORT_SYMBOL_GPL(machine_check_poll);
703
704 /*
705 * Do a quick check if any of the events requires a panic.
706 * This decides if we keep the events around or clear them.
707 */
708 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
709 struct pt_regs *regs)
710 {
711 int i, ret = 0;
712
713 for (i = 0; i < mca_cfg.banks; i++) {
714 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
715 if (m->status & MCI_STATUS_VAL) {
716 __set_bit(i, validp);
717 if (quirk_no_way_out)
718 quirk_no_way_out(i, m, regs);
719 }
720 if (mce_severity(m, mca_cfg.tolerant, msg, true) >=
721 MCE_PANIC_SEVERITY)
722 ret = 1;
723 }
724 return ret;
725 }
726
727 /*
728 * Variable to establish order between CPUs while scanning.
729 * Each CPU spins initially until executing is equal its number.
730 */
731 static atomic_t mce_executing;
732
733 /*
734 * Defines order of CPUs on entry. First CPU becomes Monarch.
735 */
736 static atomic_t mce_callin;
737
738 /*
739 * Check if a timeout waiting for other CPUs happened.
740 */
741 static int mce_timed_out(u64 *t, const char *msg)
742 {
743 /*
744 * The others already did panic for some reason.
745 * Bail out like in a timeout.
746 * rmb() to tell the compiler that system_state
747 * might have been modified by someone else.
748 */
749 rmb();
750 if (atomic_read(&mce_panicked))
751 wait_for_panic();
752 if (!mca_cfg.monarch_timeout)
753 goto out;
754 if ((s64)*t < SPINUNIT) {
755 if (mca_cfg.tolerant <= 1)
756 mce_panic(msg, NULL, NULL);
757 cpu_missing = 1;
758 return 1;
759 }
760 *t -= SPINUNIT;
761 out:
762 touch_nmi_watchdog();
763 return 0;
764 }
765
766 /*
767 * The Monarch's reign. The Monarch is the CPU who entered
768 * the machine check handler first. It waits for the others to
769 * raise the exception too and then grades them. When any
770 * error is fatal panic. Only then let the others continue.
771 *
772 * The other CPUs entering the MCE handler will be controlled by the
773 * Monarch. They are called Subjects.
774 *
775 * This way we prevent any potential data corruption in a unrecoverable case
776 * and also makes sure always all CPU's errors are examined.
777 *
778 * Also this detects the case of a machine check event coming from outer
779 * space (not detected by any CPUs) In this case some external agent wants
780 * us to shut down, so panic too.
781 *
782 * The other CPUs might still decide to panic if the handler happens
783 * in a unrecoverable place, but in this case the system is in a semi-stable
784 * state and won't corrupt anything by itself. It's ok to let the others
785 * continue for a bit first.
786 *
787 * All the spin loops have timeouts; when a timeout happens a CPU
788 * typically elects itself to be Monarch.
789 */
790 static void mce_reign(void)
791 {
792 int cpu;
793 struct mce *m = NULL;
794 int global_worst = 0;
795 char *msg = NULL;
796 char *nmsg = NULL;
797
798 /*
799 * This CPU is the Monarch and the other CPUs have run
800 * through their handlers.
801 * Grade the severity of the errors of all the CPUs.
802 */
803 for_each_possible_cpu(cpu) {
804 int severity = mce_severity(&per_cpu(mces_seen, cpu),
805 mca_cfg.tolerant,
806 &nmsg, true);
807 if (severity > global_worst) {
808 msg = nmsg;
809 global_worst = severity;
810 m = &per_cpu(mces_seen, cpu);
811 }
812 }
813
814 /*
815 * Cannot recover? Panic here then.
816 * This dumps all the mces in the log buffer and stops the
817 * other CPUs.
818 */
819 if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
820 mce_panic("Fatal machine check", m, msg);
821
822 /*
823 * For UC somewhere we let the CPU who detects it handle it.
824 * Also must let continue the others, otherwise the handling
825 * CPU could deadlock on a lock.
826 */
827
828 /*
829 * No machine check event found. Must be some external
830 * source or one CPU is hung. Panic.
831 */
832 if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
833 mce_panic("Fatal machine check from unknown source", NULL, NULL);
834
835 /*
836 * Now clear all the mces_seen so that they don't reappear on
837 * the next mce.
838 */
839 for_each_possible_cpu(cpu)
840 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
841 }
842
843 static atomic_t global_nwo;
844
845 /*
846 * Start of Monarch synchronization. This waits until all CPUs have
847 * entered the exception handler and then determines if any of them
848 * saw a fatal event that requires panic. Then it executes them
849 * in the entry order.
850 * TBD double check parallel CPU hotunplug
851 */
852 static int mce_start(int *no_way_out)
853 {
854 int order;
855 int cpus = num_online_cpus();
856 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
857
858 if (!timeout)
859 return -1;
860
861 atomic_add(*no_way_out, &global_nwo);
862 /*
863 * global_nwo should be updated before mce_callin
864 */
865 smp_wmb();
866 order = atomic_inc_return(&mce_callin);
867
868 /*
869 * Wait for everyone.
870 */
871 while (atomic_read(&mce_callin) != cpus) {
872 if (mce_timed_out(&timeout,
873 "Timeout: Not all CPUs entered broadcast exception handler")) {
874 atomic_set(&global_nwo, 0);
875 return -1;
876 }
877 ndelay(SPINUNIT);
878 }
879
880 /*
881 * mce_callin should be read before global_nwo
882 */
883 smp_rmb();
884
885 if (order == 1) {
886 /*
887 * Monarch: Starts executing now, the others wait.
888 */
889 atomic_set(&mce_executing, 1);
890 } else {
891 /*
892 * Subject: Now start the scanning loop one by one in
893 * the original callin order.
894 * This way when there are any shared banks it will be
895 * only seen by one CPU before cleared, avoiding duplicates.
896 */
897 while (atomic_read(&mce_executing) < order) {
898 if (mce_timed_out(&timeout,
899 "Timeout: Subject CPUs unable to finish machine check processing")) {
900 atomic_set(&global_nwo, 0);
901 return -1;
902 }
903 ndelay(SPINUNIT);
904 }
905 }
906
907 /*
908 * Cache the global no_way_out state.
909 */
910 *no_way_out = atomic_read(&global_nwo);
911
912 return order;
913 }
914
915 /*
916 * Synchronize between CPUs after main scanning loop.
917 * This invokes the bulk of the Monarch processing.
918 */
919 static int mce_end(int order)
920 {
921 int ret = -1;
922 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
923
924 if (!timeout)
925 goto reset;
926 if (order < 0)
927 goto reset;
928
929 /*
930 * Allow others to run.
931 */
932 atomic_inc(&mce_executing);
933
934 if (order == 1) {
935 /* CHECKME: Can this race with a parallel hotplug? */
936 int cpus = num_online_cpus();
937
938 /*
939 * Monarch: Wait for everyone to go through their scanning
940 * loops.
941 */
942 while (atomic_read(&mce_executing) <= cpus) {
943 if (mce_timed_out(&timeout,
944 "Timeout: Monarch CPU unable to finish machine check processing"))
945 goto reset;
946 ndelay(SPINUNIT);
947 }
948
949 mce_reign();
950 barrier();
951 ret = 0;
952 } else {
953 /*
954 * Subject: Wait for Monarch to finish.
955 */
956 while (atomic_read(&mce_executing) != 0) {
957 if (mce_timed_out(&timeout,
958 "Timeout: Monarch CPU did not finish machine check processing"))
959 goto reset;
960 ndelay(SPINUNIT);
961 }
962
963 /*
964 * Don't reset anything. That's done by the Monarch.
965 */
966 return 0;
967 }
968
969 /*
970 * Reset all global state.
971 */
972 reset:
973 atomic_set(&global_nwo, 0);
974 atomic_set(&mce_callin, 0);
975 barrier();
976
977 /*
978 * Let others run again.
979 */
980 atomic_set(&mce_executing, 0);
981 return ret;
982 }
983
984 /*
985 * Check if the address reported by the CPU is in a format we can parse.
986 * It would be possible to add code for most other cases, but all would
987 * be somewhat complicated (e.g. segment offset would require an instruction
988 * parser). So only support physical addresses up to page granuality for now.
989 */
990 static int mce_usable_address(struct mce *m)
991 {
992 if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
993 return 0;
994 if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
995 return 0;
996 if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
997 return 0;
998 return 1;
999 }
1000
1001 static void mce_clear_state(unsigned long *toclear)
1002 {
1003 int i;
1004
1005 for (i = 0; i < mca_cfg.banks; i++) {
1006 if (test_bit(i, toclear))
1007 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
1008 }
1009 }
1010
1011 /*
1012 * The actual machine check handler. This only handles real
1013 * exceptions when something got corrupted coming in through int 18.
1014 *
1015 * This is executed in NMI context not subject to normal locking rules. This
1016 * implies that most kernel services cannot be safely used. Don't even
1017 * think about putting a printk in there!
1018 *
1019 * On Intel systems this is entered on all CPUs in parallel through
1020 * MCE broadcast. However some CPUs might be broken beyond repair,
1021 * so be always careful when synchronizing with others.
1022 */
1023 void do_machine_check(struct pt_regs *regs, long error_code)
1024 {
1025 struct mca_config *cfg = &mca_cfg;
1026 struct mce m, *final;
1027 enum ctx_state prev_state;
1028 int i;
1029 int worst = 0;
1030 int severity;
1031 /*
1032 * Establish sequential order between the CPUs entering the machine
1033 * check handler.
1034 */
1035 int order;
1036 /*
1037 * If no_way_out gets set, there is no safe way to recover from this
1038 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
1039 */
1040 int no_way_out = 0;
1041 /*
1042 * If kill_it gets set, there might be a way to recover from this
1043 * error.
1044 */
1045 int kill_it = 0;
1046 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1047 DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1048 char *msg = "Unknown";
1049 u64 recover_paddr = ~0ull;
1050 int flags = MF_ACTION_REQUIRED;
1051
1052 prev_state = ist_enter(regs);
1053
1054 this_cpu_inc(mce_exception_count);
1055
1056 if (!cfg->banks)
1057 goto out;
1058
1059 mce_gather_info(&m, regs);
1060
1061 final = this_cpu_ptr(&mces_seen);
1062 *final = m;
1063
1064 memset(valid_banks, 0, sizeof(valid_banks));
1065 no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1066
1067 barrier();
1068
1069 /*
1070 * When no restart IP might need to kill or panic.
1071 * Assume the worst for now, but if we find the
1072 * severity is MCE_AR_SEVERITY we have other options.
1073 */
1074 if (!(m.mcgstatus & MCG_STATUS_RIPV))
1075 kill_it = 1;
1076
1077 /*
1078 * Go through all the banks in exclusion of the other CPUs.
1079 * This way we don't report duplicated events on shared banks
1080 * because the first one to see it will clear it.
1081 */
1082 order = mce_start(&no_way_out);
1083 for (i = 0; i < cfg->banks; i++) {
1084 __clear_bit(i, toclear);
1085 if (!test_bit(i, valid_banks))
1086 continue;
1087 if (!mce_banks[i].ctl)
1088 continue;
1089
1090 m.misc = 0;
1091 m.addr = 0;
1092 m.bank = i;
1093
1094 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
1095 if ((m.status & MCI_STATUS_VAL) == 0)
1096 continue;
1097
1098 /*
1099 * Non uncorrected or non signaled errors are handled by
1100 * machine_check_poll. Leave them alone, unless this panics.
1101 */
1102 if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1103 !no_way_out)
1104 continue;
1105
1106 /*
1107 * Set taint even when machine check was not enabled.
1108 */
1109 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1110
1111 severity = mce_severity(&m, cfg->tolerant, NULL, true);
1112
1113 /*
1114 * When machine check was for corrected/deferred handler don't
1115 * touch, unless we're panicing.
1116 */
1117 if ((severity == MCE_KEEP_SEVERITY ||
1118 severity == MCE_UCNA_SEVERITY) && !no_way_out)
1119 continue;
1120 __set_bit(i, toclear);
1121 if (severity == MCE_NO_SEVERITY) {
1122 /*
1123 * Machine check event was not enabled. Clear, but
1124 * ignore.
1125 */
1126 continue;
1127 }
1128
1129 mce_read_aux(&m, i);
1130
1131 /*
1132 * Action optional error. Queue address for later processing.
1133 * When the ring overflows we just ignore the AO error.
1134 * RED-PEN add some logging mechanism when
1135 * usable_address or mce_add_ring fails.
1136 * RED-PEN don't ignore overflow for mca_cfg.tolerant == 0
1137 */
1138 if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
1139 mce_ring_add(m.addr >> PAGE_SHIFT);
1140
1141 mce_log(&m);
1142
1143 if (severity > worst) {
1144 *final = m;
1145 worst = severity;
1146 }
1147 }
1148
1149 /* mce_clear_state will clear *final, save locally for use later */
1150 m = *final;
1151
1152 if (!no_way_out)
1153 mce_clear_state(toclear);
1154
1155 /*
1156 * Do most of the synchronization with other CPUs.
1157 * When there's any problem use only local no_way_out state.
1158 */
1159 if (mce_end(order) < 0)
1160 no_way_out = worst >= MCE_PANIC_SEVERITY;
1161
1162 /*
1163 * At insane "tolerant" levels we take no action. Otherwise
1164 * we only die if we have no other choice. For less serious
1165 * issues we try to recover, or limit damage to the current
1166 * process.
1167 */
1168 if (cfg->tolerant < 3) {
1169 if (no_way_out)
1170 mce_panic("Fatal machine check on current CPU", &m, msg);
1171 if (worst == MCE_AR_SEVERITY) {
1172 recover_paddr = m.addr;
1173 if (!(m.mcgstatus & MCG_STATUS_RIPV))
1174 flags |= MF_MUST_KILL;
1175 } else if (kill_it) {
1176 force_sig(SIGBUS, current);
1177 }
1178 }
1179
1180 if (worst > 0)
1181 mce_report_event(regs);
1182 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1183 out:
1184 sync_core();
1185
1186 if (recover_paddr == ~0ull)
1187 goto done;
1188
1189 pr_err("Uncorrected hardware memory error in user-access at %llx",
1190 recover_paddr);
1191 /*
1192 * We must call memory_failure() here even if the current process is
1193 * doomed. We still need to mark the page as poisoned and alert any
1194 * other users of the page.
1195 */
1196 ist_begin_non_atomic(regs);
1197 local_irq_enable();
1198 if (memory_failure(recover_paddr >> PAGE_SHIFT, MCE_VECTOR, flags) < 0) {
1199 pr_err("Memory error not recovered");
1200 force_sig(SIGBUS, current);
1201 }
1202 local_irq_disable();
1203 ist_end_non_atomic();
1204 done:
1205 ist_exit(regs, prev_state);
1206 }
1207 EXPORT_SYMBOL_GPL(do_machine_check);
1208
1209 #ifndef CONFIG_MEMORY_FAILURE
1210 int memory_failure(unsigned long pfn, int vector, int flags)
1211 {
1212 /* mce_severity() should not hand us an ACTION_REQUIRED error */
1213 BUG_ON(flags & MF_ACTION_REQUIRED);
1214 pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1215 "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1216 pfn);
1217
1218 return 0;
1219 }
1220 #endif
1221
1222 /*
1223 * Action optional processing happens here (picking up
1224 * from the list of faulting pages that do_machine_check()
1225 * placed into the "ring").
1226 */
1227 static void mce_process_work(struct work_struct *dummy)
1228 {
1229 unsigned long pfn;
1230
1231 while (mce_ring_get(&pfn))
1232 memory_failure(pfn, MCE_VECTOR, 0);
1233 }
1234
1235 #ifdef CONFIG_X86_MCE_INTEL
1236 /***
1237 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
1238 * @cpu: The CPU on which the event occurred.
1239 * @status: Event status information
1240 *
1241 * This function should be called by the thermal interrupt after the
1242 * event has been processed and the decision was made to log the event
1243 * further.
1244 *
1245 * The status parameter will be saved to the 'status' field of 'struct mce'
1246 * and historically has been the register value of the
1247 * MSR_IA32_THERMAL_STATUS (Intel) msr.
1248 */
1249 void mce_log_therm_throt_event(__u64 status)
1250 {
1251 struct mce m;
1252
1253 mce_setup(&m);
1254 m.bank = MCE_THERMAL_BANK;
1255 m.status = status;
1256 mce_log(&m);
1257 }
1258 #endif /* CONFIG_X86_MCE_INTEL */
1259
1260 /*
1261 * Periodic polling timer for "silent" machine check errors. If the
1262 * poller finds an MCE, poll 2x faster. When the poller finds no more
1263 * errors, poll 2x slower (up to check_interval seconds).
1264 */
1265 static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
1266
1267 static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1268 static DEFINE_PER_CPU(struct timer_list, mce_timer);
1269
1270 static unsigned long mce_adjust_timer_default(unsigned long interval)
1271 {
1272 return interval;
1273 }
1274
1275 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
1276
1277 static void __restart_timer(struct timer_list *t, unsigned long interval)
1278 {
1279 unsigned long when = jiffies + interval;
1280 unsigned long flags;
1281
1282 local_irq_save(flags);
1283
1284 if (timer_pending(t)) {
1285 if (time_before(when, t->expires))
1286 mod_timer_pinned(t, when);
1287 } else {
1288 t->expires = round_jiffies(when);
1289 add_timer_on(t, smp_processor_id());
1290 }
1291
1292 local_irq_restore(flags);
1293 }
1294
1295 static void mce_timer_fn(unsigned long data)
1296 {
1297 struct timer_list *t = this_cpu_ptr(&mce_timer);
1298 int cpu = smp_processor_id();
1299 unsigned long iv;
1300
1301 WARN_ON(cpu != data);
1302
1303 iv = __this_cpu_read(mce_next_interval);
1304
1305 if (mce_available(this_cpu_ptr(&cpu_info))) {
1306 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_poll_banks));
1307
1308 if (mce_intel_cmci_poll()) {
1309 iv = mce_adjust_timer(iv);
1310 goto done;
1311 }
1312 }
1313
1314 /*
1315 * Alert userspace if needed. If we logged an MCE, reduce the polling
1316 * interval, otherwise increase the polling interval.
1317 */
1318 if (mce_notify_irq())
1319 iv = max(iv / 2, (unsigned long) HZ/100);
1320 else
1321 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1322
1323 done:
1324 __this_cpu_write(mce_next_interval, iv);
1325 __restart_timer(t, iv);
1326 }
1327
1328 /*
1329 * Ensure that the timer is firing in @interval from now.
1330 */
1331 void mce_timer_kick(unsigned long interval)
1332 {
1333 struct timer_list *t = this_cpu_ptr(&mce_timer);
1334 unsigned long iv = __this_cpu_read(mce_next_interval);
1335
1336 __restart_timer(t, interval);
1337
1338 if (interval < iv)
1339 __this_cpu_write(mce_next_interval, interval);
1340 }
1341
1342 /* Must not be called in IRQ context where del_timer_sync() can deadlock */
1343 static void mce_timer_delete_all(void)
1344 {
1345 int cpu;
1346
1347 for_each_online_cpu(cpu)
1348 del_timer_sync(&per_cpu(mce_timer, cpu));
1349 }
1350
1351 static void mce_do_trigger(struct work_struct *work)
1352 {
1353 call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
1354 }
1355
1356 static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
1357
1358 /*
1359 * Notify the user(s) about new machine check events.
1360 * Can be called from interrupt context, but not from machine check/NMI
1361 * context.
1362 */
1363 int mce_notify_irq(void)
1364 {
1365 /* Not more than two messages every minute */
1366 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1367
1368 if (test_and_clear_bit(0, &mce_need_notify)) {
1369 /* wake processes polling /dev/mcelog */
1370 wake_up_interruptible(&mce_chrdev_wait);
1371
1372 if (mce_helper[0])
1373 schedule_work(&mce_trigger_work);
1374
1375 if (__ratelimit(&ratelimit))
1376 pr_info(HW_ERR "Machine check events logged\n");
1377
1378 return 1;
1379 }
1380 return 0;
1381 }
1382 EXPORT_SYMBOL_GPL(mce_notify_irq);
1383
1384 static int __mcheck_cpu_mce_banks_init(void)
1385 {
1386 int i;
1387 u8 num_banks = mca_cfg.banks;
1388
1389 mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
1390 if (!mce_banks)
1391 return -ENOMEM;
1392
1393 for (i = 0; i < num_banks; i++) {
1394 struct mce_bank *b = &mce_banks[i];
1395
1396 b->ctl = -1ULL;
1397 b->init = 1;
1398 }
1399 return 0;
1400 }
1401
1402 /*
1403 * Initialize Machine Checks for a CPU.
1404 */
1405 static int __mcheck_cpu_cap_init(void)
1406 {
1407 unsigned b;
1408 u64 cap;
1409
1410 rdmsrl(MSR_IA32_MCG_CAP, cap);
1411
1412 b = cap & MCG_BANKCNT_MASK;
1413 if (!mca_cfg.banks)
1414 pr_info("CPU supports %d MCE banks\n", b);
1415
1416 if (b > MAX_NR_BANKS) {
1417 pr_warn("Using only %u machine check banks out of %u\n",
1418 MAX_NR_BANKS, b);
1419 b = MAX_NR_BANKS;
1420 }
1421
1422 /* Don't support asymmetric configurations today */
1423 WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1424 mca_cfg.banks = b;
1425
1426 if (!mce_banks) {
1427 int err = __mcheck_cpu_mce_banks_init();
1428
1429 if (err)
1430 return err;
1431 }
1432
1433 /* Use accurate RIP reporting if available. */
1434 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1435 mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1436
1437 if (cap & MCG_SER_P)
1438 mca_cfg.ser = true;
1439
1440 return 0;
1441 }
1442
1443 static void __mcheck_cpu_init_generic(void)
1444 {
1445 enum mcp_flags m_fl = 0;
1446 mce_banks_t all_banks;
1447 u64 cap;
1448 int i;
1449
1450 if (!mca_cfg.bootlog)
1451 m_fl = MCP_DONTLOG;
1452
1453 /*
1454 * Log the machine checks left over from the previous reset.
1455 */
1456 bitmap_fill(all_banks, MAX_NR_BANKS);
1457 machine_check_poll(MCP_UC | m_fl, &all_banks);
1458
1459 set_in_cr4(X86_CR4_MCE);
1460
1461 rdmsrl(MSR_IA32_MCG_CAP, cap);
1462 if (cap & MCG_CTL_P)
1463 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1464
1465 for (i = 0; i < mca_cfg.banks; i++) {
1466 struct mce_bank *b = &mce_banks[i];
1467
1468 if (!b->init)
1469 continue;
1470 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
1471 wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
1472 }
1473 }
1474
1475 /*
1476 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1477 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1478 * Vol 3B Table 15-20). But this confuses both the code that determines
1479 * whether the machine check occurred in kernel or user mode, and also
1480 * the severity assessment code. Pretend that EIPV was set, and take the
1481 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1482 */
1483 static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1484 {
1485 if (bank != 0)
1486 return;
1487 if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1488 return;
1489 if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1490 MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1491 MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1492 MCACOD)) !=
1493 (MCI_STATUS_UC|MCI_STATUS_EN|
1494 MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1495 MCI_STATUS_AR|MCACOD_INSTR))
1496 return;
1497
1498 m->mcgstatus |= MCG_STATUS_EIPV;
1499 m->ip = regs->ip;
1500 m->cs = regs->cs;
1501 }
1502
1503 /* Add per CPU specific workarounds here */
1504 static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1505 {
1506 struct mca_config *cfg = &mca_cfg;
1507
1508 if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1509 pr_info("unknown CPU type - not enabling MCE support\n");
1510 return -EOPNOTSUPP;
1511 }
1512
1513 /* This should be disabled by the BIOS, but isn't always */
1514 if (c->x86_vendor == X86_VENDOR_AMD) {
1515 if (c->x86 == 15 && cfg->banks > 4) {
1516 /*
1517 * disable GART TBL walk error reporting, which
1518 * trips off incorrectly with the IOMMU & 3ware
1519 * & Cerberus:
1520 */
1521 clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1522 }
1523 if (c->x86 <= 17 && cfg->bootlog < 0) {
1524 /*
1525 * Lots of broken BIOS around that don't clear them
1526 * by default and leave crap in there. Don't log:
1527 */
1528 cfg->bootlog = 0;
1529 }
1530 /*
1531 * Various K7s with broken bank 0 around. Always disable
1532 * by default.
1533 */
1534 if (c->x86 == 6 && cfg->banks > 0)
1535 mce_banks[0].ctl = 0;
1536
1537 /*
1538 * Turn off MC4_MISC thresholding banks on those models since
1539 * they're not supported there.
1540 */
1541 if (c->x86 == 0x15 &&
1542 (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
1543 int i;
1544 u64 hwcr;
1545 bool need_toggle;
1546 u32 msrs[] = {
1547 0x00000413, /* MC4_MISC0 */
1548 0xc0000408, /* MC4_MISC1 */
1549 };
1550
1551 rdmsrl(MSR_K7_HWCR, hwcr);
1552
1553 /* McStatusWrEn has to be set */
1554 need_toggle = !(hwcr & BIT(18));
1555
1556 if (need_toggle)
1557 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
1558
1559 /* Clear CntP bit safely */
1560 for (i = 0; i < ARRAY_SIZE(msrs); i++)
1561 msr_clear_bit(msrs[i], 62);
1562
1563 /* restore old settings */
1564 if (need_toggle)
1565 wrmsrl(MSR_K7_HWCR, hwcr);
1566 }
1567 }
1568
1569 if (c->x86_vendor == X86_VENDOR_INTEL) {
1570 /*
1571 * SDM documents that on family 6 bank 0 should not be written
1572 * because it aliases to another special BIOS controlled
1573 * register.
1574 * But it's not aliased anymore on model 0x1a+
1575 * Don't ignore bank 0 completely because there could be a
1576 * valid event later, merely don't write CTL0.
1577 */
1578
1579 if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
1580 mce_banks[0].init = 0;
1581
1582 /*
1583 * All newer Intel systems support MCE broadcasting. Enable
1584 * synchronization with a one second timeout.
1585 */
1586 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1587 cfg->monarch_timeout < 0)
1588 cfg->monarch_timeout = USEC_PER_SEC;
1589
1590 /*
1591 * There are also broken BIOSes on some Pentium M and
1592 * earlier systems:
1593 */
1594 if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1595 cfg->bootlog = 0;
1596
1597 if (c->x86 == 6 && c->x86_model == 45)
1598 quirk_no_way_out = quirk_sandybridge_ifu;
1599 }
1600 if (cfg->monarch_timeout < 0)
1601 cfg->monarch_timeout = 0;
1602 if (cfg->bootlog != 0)
1603 cfg->panic_timeout = 30;
1604
1605 return 0;
1606 }
1607
1608 static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1609 {
1610 if (c->x86 != 5)
1611 return 0;
1612
1613 switch (c->x86_vendor) {
1614 case X86_VENDOR_INTEL:
1615 intel_p5_mcheck_init(c);
1616 return 1;
1617 break;
1618 case X86_VENDOR_CENTAUR:
1619 winchip_mcheck_init(c);
1620 return 1;
1621 break;
1622 }
1623
1624 return 0;
1625 }
1626
1627 static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1628 {
1629 switch (c->x86_vendor) {
1630 case X86_VENDOR_INTEL:
1631 mce_intel_feature_init(c);
1632 mce_adjust_timer = cmci_intel_adjust_timer;
1633 break;
1634 case X86_VENDOR_AMD:
1635 mce_amd_feature_init(c);
1636 break;
1637 default:
1638 break;
1639 }
1640 }
1641
1642 static void mce_start_timer(unsigned int cpu, struct timer_list *t)
1643 {
1644 unsigned long iv = check_interval * HZ;
1645
1646 if (mca_cfg.ignore_ce || !iv)
1647 return;
1648
1649 per_cpu(mce_next_interval, cpu) = iv;
1650
1651 t->expires = round_jiffies(jiffies + iv);
1652 add_timer_on(t, cpu);
1653 }
1654
1655 static void __mcheck_cpu_init_timer(void)
1656 {
1657 struct timer_list *t = this_cpu_ptr(&mce_timer);
1658 unsigned int cpu = smp_processor_id();
1659
1660 setup_timer(t, mce_timer_fn, cpu);
1661 mce_start_timer(cpu, t);
1662 }
1663
1664 /* Handle unconfigured int18 (should never happen) */
1665 static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1666 {
1667 pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
1668 smp_processor_id());
1669 }
1670
1671 /* Call the installed machine check handler for this CPU setup. */
1672 void (*machine_check_vector)(struct pt_regs *, long error_code) =
1673 unexpected_machine_check;
1674
1675 /*
1676 * Called for each booted CPU to set up machine checks.
1677 * Must be called with preempt off:
1678 */
1679 void mcheck_cpu_init(struct cpuinfo_x86 *c)
1680 {
1681 if (mca_cfg.disabled)
1682 return;
1683
1684 if (__mcheck_cpu_ancient_init(c))
1685 return;
1686
1687 if (!mce_available(c))
1688 return;
1689
1690 if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
1691 mca_cfg.disabled = true;
1692 return;
1693 }
1694
1695 machine_check_vector = do_machine_check;
1696
1697 __mcheck_cpu_init_generic();
1698 __mcheck_cpu_init_vendor(c);
1699 __mcheck_cpu_init_timer();
1700 INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work);
1701 init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb);
1702 }
1703
1704 /*
1705 * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
1706 */
1707
1708 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
1709 static int mce_chrdev_open_count; /* #times opened */
1710 static int mce_chrdev_open_exclu; /* already open exclusive? */
1711
1712 static int mce_chrdev_open(struct inode *inode, struct file *file)
1713 {
1714 spin_lock(&mce_chrdev_state_lock);
1715
1716 if (mce_chrdev_open_exclu ||
1717 (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
1718 spin_unlock(&mce_chrdev_state_lock);
1719
1720 return -EBUSY;
1721 }
1722
1723 if (file->f_flags & O_EXCL)
1724 mce_chrdev_open_exclu = 1;
1725 mce_chrdev_open_count++;
1726
1727 spin_unlock(&mce_chrdev_state_lock);
1728
1729 return nonseekable_open(inode, file);
1730 }
1731
1732 static int mce_chrdev_release(struct inode *inode, struct file *file)
1733 {
1734 spin_lock(&mce_chrdev_state_lock);
1735
1736 mce_chrdev_open_count--;
1737 mce_chrdev_open_exclu = 0;
1738
1739 spin_unlock(&mce_chrdev_state_lock);
1740
1741 return 0;
1742 }
1743
1744 static void collect_tscs(void *data)
1745 {
1746 unsigned long *cpu_tsc = (unsigned long *)data;
1747
1748 rdtscll(cpu_tsc[smp_processor_id()]);
1749 }
1750
1751 static int mce_apei_read_done;
1752
1753 /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
1754 static int __mce_read_apei(char __user **ubuf, size_t usize)
1755 {
1756 int rc;
1757 u64 record_id;
1758 struct mce m;
1759
1760 if (usize < sizeof(struct mce))
1761 return -EINVAL;
1762
1763 rc = apei_read_mce(&m, &record_id);
1764 /* Error or no more MCE record */
1765 if (rc <= 0) {
1766 mce_apei_read_done = 1;
1767 /*
1768 * When ERST is disabled, mce_chrdev_read() should return
1769 * "no record" instead of "no device."
1770 */
1771 if (rc == -ENODEV)
1772 return 0;
1773 return rc;
1774 }
1775 rc = -EFAULT;
1776 if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
1777 return rc;
1778 /*
1779 * In fact, we should have cleared the record after that has
1780 * been flushed to the disk or sent to network in
1781 * /sbin/mcelog, but we have no interface to support that now,
1782 * so just clear it to avoid duplication.
1783 */
1784 rc = apei_clear_mce(record_id);
1785 if (rc) {
1786 mce_apei_read_done = 1;
1787 return rc;
1788 }
1789 *ubuf += sizeof(struct mce);
1790
1791 return 0;
1792 }
1793
1794 static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
1795 size_t usize, loff_t *off)
1796 {
1797 char __user *buf = ubuf;
1798 unsigned long *cpu_tsc;
1799 unsigned prev, next;
1800 int i, err;
1801
1802 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
1803 if (!cpu_tsc)
1804 return -ENOMEM;
1805
1806 mutex_lock(&mce_chrdev_read_mutex);
1807
1808 if (!mce_apei_read_done) {
1809 err = __mce_read_apei(&buf, usize);
1810 if (err || buf != ubuf)
1811 goto out;
1812 }
1813
1814 next = rcu_dereference_check_mce(mcelog.next);
1815
1816 /* Only supports full reads right now */
1817 err = -EINVAL;
1818 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
1819 goto out;
1820
1821 err = 0;
1822 prev = 0;
1823 do {
1824 for (i = prev; i < next; i++) {
1825 unsigned long start = jiffies;
1826 struct mce *m = &mcelog.entry[i];
1827
1828 while (!m->finished) {
1829 if (time_after_eq(jiffies, start + 2)) {
1830 memset(m, 0, sizeof(*m));
1831 goto timeout;
1832 }
1833 cpu_relax();
1834 }
1835 smp_rmb();
1836 err |= copy_to_user(buf, m, sizeof(*m));
1837 buf += sizeof(*m);
1838 timeout:
1839 ;
1840 }
1841
1842 memset(mcelog.entry + prev, 0,
1843 (next - prev) * sizeof(struct mce));
1844 prev = next;
1845 next = cmpxchg(&mcelog.next, prev, 0);
1846 } while (next != prev);
1847
1848 synchronize_sched();
1849
1850 /*
1851 * Collect entries that were still getting written before the
1852 * synchronize.
1853 */
1854 on_each_cpu(collect_tscs, cpu_tsc, 1);
1855
1856 for (i = next; i < MCE_LOG_LEN; i++) {
1857 struct mce *m = &mcelog.entry[i];
1858
1859 if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
1860 err |= copy_to_user(buf, m, sizeof(*m));
1861 smp_rmb();
1862 buf += sizeof(*m);
1863 memset(m, 0, sizeof(*m));
1864 }
1865 }
1866
1867 if (err)
1868 err = -EFAULT;
1869
1870 out:
1871 mutex_unlock(&mce_chrdev_read_mutex);
1872 kfree(cpu_tsc);
1873
1874 return err ? err : buf - ubuf;
1875 }
1876
1877 static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
1878 {
1879 poll_wait(file, &mce_chrdev_wait, wait);
1880 if (rcu_access_index(mcelog.next))
1881 return POLLIN | POLLRDNORM;
1882 if (!mce_apei_read_done && apei_check_mce())
1883 return POLLIN | POLLRDNORM;
1884 return 0;
1885 }
1886
1887 static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
1888 unsigned long arg)
1889 {
1890 int __user *p = (int __user *)arg;
1891
1892 if (!capable(CAP_SYS_ADMIN))
1893 return -EPERM;
1894
1895 switch (cmd) {
1896 case MCE_GET_RECORD_LEN:
1897 return put_user(sizeof(struct mce), p);
1898 case MCE_GET_LOG_LEN:
1899 return put_user(MCE_LOG_LEN, p);
1900 case MCE_GETCLEAR_FLAGS: {
1901 unsigned flags;
1902
1903 do {
1904 flags = mcelog.flags;
1905 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
1906
1907 return put_user(flags, p);
1908 }
1909 default:
1910 return -ENOTTY;
1911 }
1912 }
1913
1914 static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
1915 size_t usize, loff_t *off);
1916
1917 void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
1918 const char __user *ubuf,
1919 size_t usize, loff_t *off))
1920 {
1921 mce_write = fn;
1922 }
1923 EXPORT_SYMBOL_GPL(register_mce_write_callback);
1924
1925 ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
1926 size_t usize, loff_t *off)
1927 {
1928 if (mce_write)
1929 return mce_write(filp, ubuf, usize, off);
1930 else
1931 return -EINVAL;
1932 }
1933
1934 static const struct file_operations mce_chrdev_ops = {
1935 .open = mce_chrdev_open,
1936 .release = mce_chrdev_release,
1937 .read = mce_chrdev_read,
1938 .write = mce_chrdev_write,
1939 .poll = mce_chrdev_poll,
1940 .unlocked_ioctl = mce_chrdev_ioctl,
1941 .llseek = no_llseek,
1942 };
1943
1944 static struct miscdevice mce_chrdev_device = {
1945 MISC_MCELOG_MINOR,
1946 "mcelog",
1947 &mce_chrdev_ops,
1948 };
1949
1950 static void __mce_disable_bank(void *arg)
1951 {
1952 int bank = *((int *)arg);
1953 __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
1954 cmci_disable_bank(bank);
1955 }
1956
1957 void mce_disable_bank(int bank)
1958 {
1959 if (bank >= mca_cfg.banks) {
1960 pr_warn(FW_BUG
1961 "Ignoring request to disable invalid MCA bank %d.\n",
1962 bank);
1963 return;
1964 }
1965 set_bit(bank, mce_banks_ce_disabled);
1966 on_each_cpu(__mce_disable_bank, &bank, 1);
1967 }
1968
1969 /*
1970 * mce=off Disables machine check
1971 * mce=no_cmci Disables CMCI
1972 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
1973 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
1974 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
1975 * monarchtimeout is how long to wait for other CPUs on machine
1976 * check, or 0 to not wait
1977 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
1978 * mce=nobootlog Don't log MCEs from before booting.
1979 * mce=bios_cmci_threshold Don't program the CMCI threshold
1980 */
1981 static int __init mcheck_enable(char *str)
1982 {
1983 struct mca_config *cfg = &mca_cfg;
1984
1985 if (*str == 0) {
1986 enable_p5_mce();
1987 return 1;
1988 }
1989 if (*str == '=')
1990 str++;
1991 if (!strcmp(str, "off"))
1992 cfg->disabled = true;
1993 else if (!strcmp(str, "no_cmci"))
1994 cfg->cmci_disabled = true;
1995 else if (!strcmp(str, "dont_log_ce"))
1996 cfg->dont_log_ce = true;
1997 else if (!strcmp(str, "ignore_ce"))
1998 cfg->ignore_ce = true;
1999 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
2000 cfg->bootlog = (str[0] == 'b');
2001 else if (!strcmp(str, "bios_cmci_threshold"))
2002 cfg->bios_cmci_threshold = true;
2003 else if (isdigit(str[0])) {
2004 get_option(&str, &(cfg->tolerant));
2005 if (*str == ',') {
2006 ++str;
2007 get_option(&str, &(cfg->monarch_timeout));
2008 }
2009 } else {
2010 pr_info("mce argument %s ignored. Please use /sys\n", str);
2011 return 0;
2012 }
2013 return 1;
2014 }
2015 __setup("mce", mcheck_enable);
2016
2017 int __init mcheck_init(void)
2018 {
2019 mcheck_intel_therm_init();
2020
2021 return 0;
2022 }
2023
2024 /*
2025 * mce_syscore: PM support
2026 */
2027
2028 /*
2029 * Disable machine checks on suspend and shutdown. We can't really handle
2030 * them later.
2031 */
2032 static int mce_disable_error_reporting(void)
2033 {
2034 int i;
2035
2036 for (i = 0; i < mca_cfg.banks; i++) {
2037 struct mce_bank *b = &mce_banks[i];
2038
2039 if (b->init)
2040 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
2041 }
2042 return 0;
2043 }
2044
2045 static int mce_syscore_suspend(void)
2046 {
2047 return mce_disable_error_reporting();
2048 }
2049
2050 static void mce_syscore_shutdown(void)
2051 {
2052 mce_disable_error_reporting();
2053 }
2054
2055 /*
2056 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2057 * Only one CPU is active at this time, the others get re-added later using
2058 * CPU hotplug:
2059 */
2060 static void mce_syscore_resume(void)
2061 {
2062 __mcheck_cpu_init_generic();
2063 __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2064 }
2065
2066 static struct syscore_ops mce_syscore_ops = {
2067 .suspend = mce_syscore_suspend,
2068 .shutdown = mce_syscore_shutdown,
2069 .resume = mce_syscore_resume,
2070 };
2071
2072 /*
2073 * mce_device: Sysfs support
2074 */
2075
2076 static void mce_cpu_restart(void *data)
2077 {
2078 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2079 return;
2080 __mcheck_cpu_init_generic();
2081 __mcheck_cpu_init_timer();
2082 }
2083
2084 /* Reinit MCEs after user configuration changes */
2085 static void mce_restart(void)
2086 {
2087 mce_timer_delete_all();
2088 on_each_cpu(mce_cpu_restart, NULL, 1);
2089 }
2090
2091 /* Toggle features for corrected errors */
2092 static void mce_disable_cmci(void *data)
2093 {
2094 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2095 return;
2096 cmci_clear();
2097 }
2098
2099 static void mce_enable_ce(void *all)
2100 {
2101 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2102 return;
2103 cmci_reenable();
2104 cmci_recheck();
2105 if (all)
2106 __mcheck_cpu_init_timer();
2107 }
2108
2109 static struct bus_type mce_subsys = {
2110 .name = "machinecheck",
2111 .dev_name = "machinecheck",
2112 };
2113
2114 DEFINE_PER_CPU(struct device *, mce_device);
2115
2116 void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
2117
2118 static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
2119 {
2120 return container_of(attr, struct mce_bank, attr);
2121 }
2122
2123 static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2124 char *buf)
2125 {
2126 return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
2127 }
2128
2129 static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2130 const char *buf, size_t size)
2131 {
2132 u64 new;
2133
2134 if (kstrtou64(buf, 0, &new) < 0)
2135 return -EINVAL;
2136
2137 attr_to_bank(attr)->ctl = new;
2138 mce_restart();
2139
2140 return size;
2141 }
2142
2143 static ssize_t
2144 show_trigger(struct device *s, struct device_attribute *attr, char *buf)
2145 {
2146 strcpy(buf, mce_helper);
2147 strcat(buf, "\n");
2148 return strlen(mce_helper) + 1;
2149 }
2150
2151 static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
2152 const char *buf, size_t siz)
2153 {
2154 char *p;
2155
2156 strncpy(mce_helper, buf, sizeof(mce_helper));
2157 mce_helper[sizeof(mce_helper)-1] = 0;
2158 p = strchr(mce_helper, '\n');
2159
2160 if (p)
2161 *p = 0;
2162
2163 return strlen(mce_helper) + !!p;
2164 }
2165
2166 static ssize_t set_ignore_ce(struct device *s,
2167 struct device_attribute *attr,
2168 const char *buf, size_t size)
2169 {
2170 u64 new;
2171
2172 if (kstrtou64(buf, 0, &new) < 0)
2173 return -EINVAL;
2174
2175 if (mca_cfg.ignore_ce ^ !!new) {
2176 if (new) {
2177 /* disable ce features */
2178 mce_timer_delete_all();
2179 on_each_cpu(mce_disable_cmci, NULL, 1);
2180 mca_cfg.ignore_ce = true;
2181 } else {
2182 /* enable ce features */
2183 mca_cfg.ignore_ce = false;
2184 on_each_cpu(mce_enable_ce, (void *)1, 1);
2185 }
2186 }
2187 return size;
2188 }
2189
2190 static ssize_t set_cmci_disabled(struct device *s,
2191 struct device_attribute *attr,
2192 const char *buf, size_t size)
2193 {
2194 u64 new;
2195
2196 if (kstrtou64(buf, 0, &new) < 0)
2197 return -EINVAL;
2198
2199 if (mca_cfg.cmci_disabled ^ !!new) {
2200 if (new) {
2201 /* disable cmci */
2202 on_each_cpu(mce_disable_cmci, NULL, 1);
2203 mca_cfg.cmci_disabled = true;
2204 } else {
2205 /* enable cmci */
2206 mca_cfg.cmci_disabled = false;
2207 on_each_cpu(mce_enable_ce, NULL, 1);
2208 }
2209 }
2210 return size;
2211 }
2212
2213 static ssize_t store_int_with_restart(struct device *s,
2214 struct device_attribute *attr,
2215 const char *buf, size_t size)
2216 {
2217 ssize_t ret = device_store_int(s, attr, buf, size);
2218 mce_restart();
2219 return ret;
2220 }
2221
2222 static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
2223 static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2224 static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2225 static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2226
2227 static struct dev_ext_attribute dev_attr_check_interval = {
2228 __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2229 &check_interval
2230 };
2231
2232 static struct dev_ext_attribute dev_attr_ignore_ce = {
2233 __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2234 &mca_cfg.ignore_ce
2235 };
2236
2237 static struct dev_ext_attribute dev_attr_cmci_disabled = {
2238 __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2239 &mca_cfg.cmci_disabled
2240 };
2241
2242 static struct device_attribute *mce_device_attrs[] = {
2243 &dev_attr_tolerant.attr,
2244 &dev_attr_check_interval.attr,
2245 &dev_attr_trigger,
2246 &dev_attr_monarch_timeout.attr,
2247 &dev_attr_dont_log_ce.attr,
2248 &dev_attr_ignore_ce.attr,
2249 &dev_attr_cmci_disabled.attr,
2250 NULL
2251 };
2252
2253 static cpumask_var_t mce_device_initialized;
2254
2255 static void mce_device_release(struct device *dev)
2256 {
2257 kfree(dev);
2258 }
2259
2260 /* Per cpu device init. All of the cpus still share the same ctrl bank: */
2261 static int mce_device_create(unsigned int cpu)
2262 {
2263 struct device *dev;
2264 int err;
2265 int i, j;
2266
2267 if (!mce_available(&boot_cpu_data))
2268 return -EIO;
2269
2270 dev = kzalloc(sizeof *dev, GFP_KERNEL);
2271 if (!dev)
2272 return -ENOMEM;
2273 dev->id = cpu;
2274 dev->bus = &mce_subsys;
2275 dev->release = &mce_device_release;
2276
2277 err = device_register(dev);
2278 if (err) {
2279 put_device(dev);
2280 return err;
2281 }
2282
2283 for (i = 0; mce_device_attrs[i]; i++) {
2284 err = device_create_file(dev, mce_device_attrs[i]);
2285 if (err)
2286 goto error;
2287 }
2288 for (j = 0; j < mca_cfg.banks; j++) {
2289 err = device_create_file(dev, &mce_banks[j].attr);
2290 if (err)
2291 goto error2;
2292 }
2293 cpumask_set_cpu(cpu, mce_device_initialized);
2294 per_cpu(mce_device, cpu) = dev;
2295
2296 return 0;
2297 error2:
2298 while (--j >= 0)
2299 device_remove_file(dev, &mce_banks[j].attr);
2300 error:
2301 while (--i >= 0)
2302 device_remove_file(dev, mce_device_attrs[i]);
2303
2304 device_unregister(dev);
2305
2306 return err;
2307 }
2308
2309 static void mce_device_remove(unsigned int cpu)
2310 {
2311 struct device *dev = per_cpu(mce_device, cpu);
2312 int i;
2313
2314 if (!cpumask_test_cpu(cpu, mce_device_initialized))
2315 return;
2316
2317 for (i = 0; mce_device_attrs[i]; i++)
2318 device_remove_file(dev, mce_device_attrs[i]);
2319
2320 for (i = 0; i < mca_cfg.banks; i++)
2321 device_remove_file(dev, &mce_banks[i].attr);
2322
2323 device_unregister(dev);
2324 cpumask_clear_cpu(cpu, mce_device_initialized);
2325 per_cpu(mce_device, cpu) = NULL;
2326 }
2327
2328 /* Make sure there are no machine checks on offlined CPUs. */
2329 static void mce_disable_cpu(void *h)
2330 {
2331 unsigned long action = *(unsigned long *)h;
2332 int i;
2333
2334 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2335 return;
2336
2337 if (!(action & CPU_TASKS_FROZEN))
2338 cmci_clear();
2339 for (i = 0; i < mca_cfg.banks; i++) {
2340 struct mce_bank *b = &mce_banks[i];
2341
2342 if (b->init)
2343 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
2344 }
2345 }
2346
2347 static void mce_reenable_cpu(void *h)
2348 {
2349 unsigned long action = *(unsigned long *)h;
2350 int i;
2351
2352 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2353 return;
2354
2355 if (!(action & CPU_TASKS_FROZEN))
2356 cmci_reenable();
2357 for (i = 0; i < mca_cfg.banks; i++) {
2358 struct mce_bank *b = &mce_banks[i];
2359
2360 if (b->init)
2361 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
2362 }
2363 }
2364
2365 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
2366 static int
2367 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2368 {
2369 unsigned int cpu = (unsigned long)hcpu;
2370 struct timer_list *t = &per_cpu(mce_timer, cpu);
2371
2372 switch (action & ~CPU_TASKS_FROZEN) {
2373 case CPU_ONLINE:
2374 mce_device_create(cpu);
2375 if (threshold_cpu_callback)
2376 threshold_cpu_callback(action, cpu);
2377 break;
2378 case CPU_DEAD:
2379 if (threshold_cpu_callback)
2380 threshold_cpu_callback(action, cpu);
2381 mce_device_remove(cpu);
2382 mce_intel_hcpu_update(cpu);
2383
2384 /* intentionally ignoring frozen here */
2385 if (!(action & CPU_TASKS_FROZEN))
2386 cmci_rediscover();
2387 break;
2388 case CPU_DOWN_PREPARE:
2389 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
2390 del_timer_sync(t);
2391 break;
2392 case CPU_DOWN_FAILED:
2393 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
2394 mce_start_timer(cpu, t);
2395 break;
2396 }
2397
2398 return NOTIFY_OK;
2399 }
2400
2401 static struct notifier_block mce_cpu_notifier = {
2402 .notifier_call = mce_cpu_callback,
2403 };
2404
2405 static __init void mce_init_banks(void)
2406 {
2407 int i;
2408
2409 for (i = 0; i < mca_cfg.banks; i++) {
2410 struct mce_bank *b = &mce_banks[i];
2411 struct device_attribute *a = &b->attr;
2412
2413 sysfs_attr_init(&a->attr);
2414 a->attr.name = b->attrname;
2415 snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2416
2417 a->attr.mode = 0644;
2418 a->show = show_bank;
2419 a->store = set_bank;
2420 }
2421 }
2422
2423 static __init int mcheck_init_device(void)
2424 {
2425 int err;
2426 int i = 0;
2427
2428 if (!mce_available(&boot_cpu_data)) {
2429 err = -EIO;
2430 goto err_out;
2431 }
2432
2433 if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2434 err = -ENOMEM;
2435 goto err_out;
2436 }
2437
2438 mce_init_banks();
2439
2440 err = subsys_system_register(&mce_subsys, NULL);
2441 if (err)
2442 goto err_out_mem;
2443
2444 cpu_notifier_register_begin();
2445 for_each_online_cpu(i) {
2446 err = mce_device_create(i);
2447 if (err) {
2448 /*
2449 * Register notifier anyway (and do not unreg it) so
2450 * that we don't leave undeleted timers, see notifier
2451 * callback above.
2452 */
2453 __register_hotcpu_notifier(&mce_cpu_notifier);
2454 cpu_notifier_register_done();
2455 goto err_device_create;
2456 }
2457 }
2458
2459 __register_hotcpu_notifier(&mce_cpu_notifier);
2460 cpu_notifier_register_done();
2461
2462 register_syscore_ops(&mce_syscore_ops);
2463
2464 /* register character device /dev/mcelog */
2465 err = misc_register(&mce_chrdev_device);
2466 if (err)
2467 goto err_register;
2468
2469 return 0;
2470
2471 err_register:
2472 unregister_syscore_ops(&mce_syscore_ops);
2473
2474 err_device_create:
2475 /*
2476 * We didn't keep track of which devices were created above, but
2477 * even if we had, the set of online cpus might have changed.
2478 * Play safe and remove for every possible cpu, since
2479 * mce_device_remove() will do the right thing.
2480 */
2481 for_each_possible_cpu(i)
2482 mce_device_remove(i);
2483
2484 err_out_mem:
2485 free_cpumask_var(mce_device_initialized);
2486
2487 err_out:
2488 pr_err("Unable to init device /dev/mcelog (rc: %d)\n", err);
2489
2490 return err;
2491 }
2492 device_initcall_sync(mcheck_init_device);
2493
2494 /*
2495 * Old style boot options parsing. Only for compatibility.
2496 */
2497 static int __init mcheck_disable(char *str)
2498 {
2499 mca_cfg.disabled = true;
2500 return 1;
2501 }
2502 __setup("nomce", mcheck_disable);
2503
2504 #ifdef CONFIG_DEBUG_FS
2505 struct dentry *mce_get_debugfs_dir(void)
2506 {
2507 static struct dentry *dmce;
2508
2509 if (!dmce)
2510 dmce = debugfs_create_dir("mce", NULL);
2511
2512 return dmce;
2513 }
2514
2515 static void mce_reset(void)
2516 {
2517 cpu_missing = 0;
2518 atomic_set(&mce_fake_panicked, 0);
2519 atomic_set(&mce_executing, 0);
2520 atomic_set(&mce_callin, 0);
2521 atomic_set(&global_nwo, 0);
2522 }
2523
2524 static int fake_panic_get(void *data, u64 *val)
2525 {
2526 *val = fake_panic;
2527 return 0;
2528 }
2529
2530 static int fake_panic_set(void *data, u64 val)
2531 {
2532 mce_reset();
2533 fake_panic = val;
2534 return 0;
2535 }
2536
2537 DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
2538 fake_panic_set, "%llu\n");
2539
2540 static int __init mcheck_debugfs_init(void)
2541 {
2542 struct dentry *dmce, *ffake_panic;
2543
2544 dmce = mce_get_debugfs_dir();
2545 if (!dmce)
2546 return -ENOMEM;
2547 ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
2548 &fake_panic_fops);
2549 if (!ffake_panic)
2550 return -ENOMEM;
2551
2552 return 0;
2553 }
2554 late_initcall(mcheck_debugfs_init);
2555 #endif
This page took 0.128971 seconds and 5 git commands to generate.