| 1 | /* |
| 2 | * Machine check handler. |
| 3 | * |
| 4 | * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. |
| 5 | * Rest from unknown author(s). |
| 6 | * 2004 Andi Kleen. Rewrote most of it. |
| 7 | * Copyright 2008 Intel Corporation |
| 8 | * Author: Andi Kleen |
| 9 | */ |
| 10 | |
| 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 12 | |
| 13 | #include <linux/thread_info.h> |
| 14 | #include <linux/capability.h> |
| 15 | #include <linux/miscdevice.h> |
| 16 | #include <linux/ratelimit.h> |
| 17 | #include <linux/kallsyms.h> |
| 18 | #include <linux/rcupdate.h> |
| 19 | #include <linux/kobject.h> |
| 20 | #include <linux/uaccess.h> |
| 21 | #include <linux/kdebug.h> |
| 22 | #include <linux/kernel.h> |
| 23 | #include <linux/percpu.h> |
| 24 | #include <linux/string.h> |
| 25 | #include <linux/device.h> |
| 26 | #include <linux/syscore_ops.h> |
| 27 | #include <linux/delay.h> |
| 28 | #include <linux/ctype.h> |
| 29 | #include <linux/sched.h> |
| 30 | #include <linux/sysfs.h> |
| 31 | #include <linux/types.h> |
| 32 | #include <linux/slab.h> |
| 33 | #include <linux/init.h> |
| 34 | #include <linux/kmod.h> |
| 35 | #include <linux/poll.h> |
| 36 | #include <linux/nmi.h> |
| 37 | #include <linux/cpu.h> |
| 38 | #include <linux/smp.h> |
| 39 | #include <linux/fs.h> |
| 40 | #include <linux/mm.h> |
| 41 | #include <linux/debugfs.h> |
| 42 | #include <linux/irq_work.h> |
| 43 | #include <linux/export.h> |
| 44 | |
| 45 | #include <asm/processor.h> |
| 46 | #include <asm/mce.h> |
| 47 | #include <asm/msr.h> |
| 48 | |
| 49 | #include "mce-internal.h" |
| 50 | |
| 51 | static DEFINE_MUTEX(mce_chrdev_read_mutex); |
| 52 | |
| 53 | #define rcu_dereference_check_mce(p) \ |
| 54 | rcu_dereference_index_check((p), \ |
| 55 | rcu_read_lock_sched_held() || \ |
| 56 | lockdep_is_held(&mce_chrdev_read_mutex)) |
| 57 | |
| 58 | #define CREATE_TRACE_POINTS |
| 59 | #include <trace/events/mce.h> |
| 60 | |
| 61 | #define SPINUNIT 100 /* 100ns */ |
| 62 | |
| 63 | DEFINE_PER_CPU(unsigned, mce_exception_count); |
| 64 | |
| 65 | struct mce_bank *mce_banks __read_mostly; |
| 66 | |
| 67 | struct mca_config mca_cfg __read_mostly = { |
| 68 | .bootlog = -1, |
| 69 | /* |
| 70 | * Tolerant levels: |
| 71 | * 0: always panic on uncorrected errors, log corrected errors |
| 72 | * 1: panic or SIGBUS on uncorrected errors, log corrected errors |
| 73 | * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors |
| 74 | * 3: never panic or SIGBUS, log all errors (for testing only) |
| 75 | */ |
| 76 | .tolerant = 1, |
| 77 | .monarch_timeout = -1 |
| 78 | }; |
| 79 | |
| 80 | /* User mode helper program triggered by machine check event */ |
| 81 | static unsigned long mce_need_notify; |
| 82 | static char mce_helper[128]; |
| 83 | static char *mce_helper_argv[2] = { mce_helper, NULL }; |
| 84 | |
| 85 | static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait); |
| 86 | |
| 87 | static DEFINE_PER_CPU(struct mce, mces_seen); |
| 88 | static int cpu_missing; |
| 89 | |
| 90 | /* CMCI storm detection filter */ |
| 91 | static DEFINE_PER_CPU(unsigned long, mce_polled_error); |
| 92 | |
| 93 | /* |
| 94 | * MCA banks polled by the period polling timer for corrected events. |
| 95 | * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). |
| 96 | */ |
| 97 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { |
| 98 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL |
| 99 | }; |
| 100 | |
| 101 | /* |
| 102 | * MCA banks controlled through firmware first for corrected errors. |
| 103 | * This is a global list of banks for which we won't enable CMCI and we |
| 104 | * won't poll. Firmware controls these banks and is responsible for |
| 105 | * reporting corrected errors through GHES. Uncorrected/recoverable |
| 106 | * errors are still notified through a machine check. |
| 107 | */ |
| 108 | mce_banks_t mce_banks_ce_disabled; |
| 109 | |
| 110 | static DEFINE_PER_CPU(struct work_struct, mce_work); |
| 111 | |
| 112 | static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); |
| 113 | |
| 114 | /* |
| 115 | * CPU/chipset specific EDAC code can register a notifier call here to print |
| 116 | * MCE errors in a human-readable form. |
| 117 | */ |
| 118 | ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); |
| 119 | |
| 120 | /* Do initial initialization of a struct mce */ |
| 121 | void mce_setup(struct mce *m) |
| 122 | { |
| 123 | memset(m, 0, sizeof(struct mce)); |
| 124 | m->cpu = m->extcpu = smp_processor_id(); |
| 125 | rdtscll(m->tsc); |
| 126 | /* We hope get_seconds stays lockless */ |
| 127 | m->time = get_seconds(); |
| 128 | m->cpuvendor = boot_cpu_data.x86_vendor; |
| 129 | m->cpuid = cpuid_eax(1); |
| 130 | m->socketid = cpu_data(m->extcpu).phys_proc_id; |
| 131 | m->apicid = cpu_data(m->extcpu).initial_apicid; |
| 132 | rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap); |
| 133 | } |
| 134 | |
| 135 | DEFINE_PER_CPU(struct mce, injectm); |
| 136 | EXPORT_PER_CPU_SYMBOL_GPL(injectm); |
| 137 | |
| 138 | /* |
| 139 | * Lockless MCE logging infrastructure. |
| 140 | * This avoids deadlocks on printk locks without having to break locks. Also |
| 141 | * separate MCEs from kernel messages to avoid bogus bug reports. |
| 142 | */ |
| 143 | |
| 144 | static struct mce_log mcelog = { |
| 145 | .signature = MCE_LOG_SIGNATURE, |
| 146 | .len = MCE_LOG_LEN, |
| 147 | .recordlen = sizeof(struct mce), |
| 148 | }; |
| 149 | |
| 150 | void mce_log(struct mce *mce) |
| 151 | { |
| 152 | unsigned next, entry; |
| 153 | int ret = 0; |
| 154 | |
| 155 | /* Emit the trace record: */ |
| 156 | trace_mce_record(mce); |
| 157 | |
| 158 | ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce); |
| 159 | if (ret == NOTIFY_STOP) |
| 160 | return; |
| 161 | |
| 162 | mce->finished = 0; |
| 163 | wmb(); |
| 164 | for (;;) { |
| 165 | entry = rcu_dereference_check_mce(mcelog.next); |
| 166 | for (;;) { |
| 167 | |
| 168 | /* |
| 169 | * When the buffer fills up discard new entries. |
| 170 | * Assume that the earlier errors are the more |
| 171 | * interesting ones: |
| 172 | */ |
| 173 | if (entry >= MCE_LOG_LEN) { |
| 174 | set_bit(MCE_OVERFLOW, |
| 175 | (unsigned long *)&mcelog.flags); |
| 176 | return; |
| 177 | } |
| 178 | /* Old left over entry. Skip: */ |
| 179 | if (mcelog.entry[entry].finished) { |
| 180 | entry++; |
| 181 | continue; |
| 182 | } |
| 183 | break; |
| 184 | } |
| 185 | smp_rmb(); |
| 186 | next = entry + 1; |
| 187 | if (cmpxchg(&mcelog.next, entry, next) == entry) |
| 188 | break; |
| 189 | } |
| 190 | memcpy(mcelog.entry + entry, mce, sizeof(struct mce)); |
| 191 | wmb(); |
| 192 | mcelog.entry[entry].finished = 1; |
| 193 | wmb(); |
| 194 | |
| 195 | mce->finished = 1; |
| 196 | set_bit(0, &mce_need_notify); |
| 197 | } |
| 198 | |
| 199 | static void drain_mcelog_buffer(void) |
| 200 | { |
| 201 | unsigned int next, i, prev = 0; |
| 202 | |
| 203 | next = ACCESS_ONCE(mcelog.next); |
| 204 | |
| 205 | do { |
| 206 | struct mce *m; |
| 207 | |
| 208 | /* drain what was logged during boot */ |
| 209 | for (i = prev; i < next; i++) { |
| 210 | unsigned long start = jiffies; |
| 211 | unsigned retries = 1; |
| 212 | |
| 213 | m = &mcelog.entry[i]; |
| 214 | |
| 215 | while (!m->finished) { |
| 216 | if (time_after_eq(jiffies, start + 2*retries)) |
| 217 | retries++; |
| 218 | |
| 219 | cpu_relax(); |
| 220 | |
| 221 | if (!m->finished && retries >= 4) { |
| 222 | pr_err("skipping error being logged currently!\n"); |
| 223 | break; |
| 224 | } |
| 225 | } |
| 226 | smp_rmb(); |
| 227 | atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); |
| 228 | } |
| 229 | |
| 230 | memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m)); |
| 231 | prev = next; |
| 232 | next = cmpxchg(&mcelog.next, prev, 0); |
| 233 | } while (next != prev); |
| 234 | } |
| 235 | |
| 236 | |
| 237 | void mce_register_decode_chain(struct notifier_block *nb) |
| 238 | { |
| 239 | atomic_notifier_chain_register(&x86_mce_decoder_chain, nb); |
| 240 | drain_mcelog_buffer(); |
| 241 | } |
| 242 | EXPORT_SYMBOL_GPL(mce_register_decode_chain); |
| 243 | |
| 244 | void mce_unregister_decode_chain(struct notifier_block *nb) |
| 245 | { |
| 246 | atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb); |
| 247 | } |
| 248 | EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); |
| 249 | |
| 250 | static void print_mce(struct mce *m) |
| 251 | { |
| 252 | int ret = 0; |
| 253 | |
| 254 | pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n", |
| 255 | m->extcpu, m->mcgstatus, m->bank, m->status); |
| 256 | |
| 257 | if (m->ip) { |
| 258 | pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ", |
| 259 | !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", |
| 260 | m->cs, m->ip); |
| 261 | |
| 262 | if (m->cs == __KERNEL_CS) |
| 263 | print_symbol("{%s}", m->ip); |
| 264 | pr_cont("\n"); |
| 265 | } |
| 266 | |
| 267 | pr_emerg(HW_ERR "TSC %llx ", m->tsc); |
| 268 | if (m->addr) |
| 269 | pr_cont("ADDR %llx ", m->addr); |
| 270 | if (m->misc) |
| 271 | pr_cont("MISC %llx ", m->misc); |
| 272 | |
| 273 | pr_cont("\n"); |
| 274 | /* |
| 275 | * Note this output is parsed by external tools and old fields |
| 276 | * should not be changed. |
| 277 | */ |
| 278 | pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", |
| 279 | m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, |
| 280 | cpu_data(m->extcpu).microcode); |
| 281 | |
| 282 | /* |
| 283 | * Print out human-readable details about the MCE error, |
| 284 | * (if the CPU has an implementation for that) |
| 285 | */ |
| 286 | ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); |
| 287 | if (ret == NOTIFY_STOP) |
| 288 | return; |
| 289 | |
| 290 | pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); |
| 291 | } |
| 292 | |
| 293 | #define PANIC_TIMEOUT 5 /* 5 seconds */ |
| 294 | |
| 295 | static atomic_t mce_panicked; |
| 296 | |
| 297 | static int fake_panic; |
| 298 | static atomic_t mce_fake_panicked; |
| 299 | |
| 300 | /* Panic in progress. Enable interrupts and wait for final IPI */ |
| 301 | static void wait_for_panic(void) |
| 302 | { |
| 303 | long timeout = PANIC_TIMEOUT*USEC_PER_SEC; |
| 304 | |
| 305 | preempt_disable(); |
| 306 | local_irq_enable(); |
| 307 | while (timeout-- > 0) |
| 308 | udelay(1); |
| 309 | if (panic_timeout == 0) |
| 310 | panic_timeout = mca_cfg.panic_timeout; |
| 311 | panic("Panicing machine check CPU died"); |
| 312 | } |
| 313 | |
| 314 | static void mce_panic(char *msg, struct mce *final, char *exp) |
| 315 | { |
| 316 | int i, apei_err = 0; |
| 317 | |
| 318 | if (!fake_panic) { |
| 319 | /* |
| 320 | * Make sure only one CPU runs in machine check panic |
| 321 | */ |
| 322 | if (atomic_inc_return(&mce_panicked) > 1) |
| 323 | wait_for_panic(); |
| 324 | barrier(); |
| 325 | |
| 326 | bust_spinlocks(1); |
| 327 | console_verbose(); |
| 328 | } else { |
| 329 | /* Don't log too much for fake panic */ |
| 330 | if (atomic_inc_return(&mce_fake_panicked) > 1) |
| 331 | return; |
| 332 | } |
| 333 | /* First print corrected ones that are still unlogged */ |
| 334 | for (i = 0; i < MCE_LOG_LEN; i++) { |
| 335 | struct mce *m = &mcelog.entry[i]; |
| 336 | if (!(m->status & MCI_STATUS_VAL)) |
| 337 | continue; |
| 338 | if (!(m->status & MCI_STATUS_UC)) { |
| 339 | print_mce(m); |
| 340 | if (!apei_err) |
| 341 | apei_err = apei_write_mce(m); |
| 342 | } |
| 343 | } |
| 344 | /* Now print uncorrected but with the final one last */ |
| 345 | for (i = 0; i < MCE_LOG_LEN; i++) { |
| 346 | struct mce *m = &mcelog.entry[i]; |
| 347 | if (!(m->status & MCI_STATUS_VAL)) |
| 348 | continue; |
| 349 | if (!(m->status & MCI_STATUS_UC)) |
| 350 | continue; |
| 351 | if (!final || memcmp(m, final, sizeof(struct mce))) { |
| 352 | print_mce(m); |
| 353 | if (!apei_err) |
| 354 | apei_err = apei_write_mce(m); |
| 355 | } |
| 356 | } |
| 357 | if (final) { |
| 358 | print_mce(final); |
| 359 | if (!apei_err) |
| 360 | apei_err = apei_write_mce(final); |
| 361 | } |
| 362 | if (cpu_missing) |
| 363 | pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n"); |
| 364 | if (exp) |
| 365 | pr_emerg(HW_ERR "Machine check: %s\n", exp); |
| 366 | if (!fake_panic) { |
| 367 | if (panic_timeout == 0) |
| 368 | panic_timeout = mca_cfg.panic_timeout; |
| 369 | panic(msg); |
| 370 | } else |
| 371 | pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); |
| 372 | } |
| 373 | |
| 374 | /* Support code for software error injection */ |
| 375 | |
| 376 | static int msr_to_offset(u32 msr) |
| 377 | { |
| 378 | unsigned bank = __this_cpu_read(injectm.bank); |
| 379 | |
| 380 | if (msr == mca_cfg.rip_msr) |
| 381 | return offsetof(struct mce, ip); |
| 382 | if (msr == MSR_IA32_MCx_STATUS(bank)) |
| 383 | return offsetof(struct mce, status); |
| 384 | if (msr == MSR_IA32_MCx_ADDR(bank)) |
| 385 | return offsetof(struct mce, addr); |
| 386 | if (msr == MSR_IA32_MCx_MISC(bank)) |
| 387 | return offsetof(struct mce, misc); |
| 388 | if (msr == MSR_IA32_MCG_STATUS) |
| 389 | return offsetof(struct mce, mcgstatus); |
| 390 | return -1; |
| 391 | } |
| 392 | |
| 393 | /* MSR access wrappers used for error injection */ |
| 394 | static u64 mce_rdmsrl(u32 msr) |
| 395 | { |
| 396 | u64 v; |
| 397 | |
| 398 | if (__this_cpu_read(injectm.finished)) { |
| 399 | int offset = msr_to_offset(msr); |
| 400 | |
| 401 | if (offset < 0) |
| 402 | return 0; |
| 403 | return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); |
| 404 | } |
| 405 | |
| 406 | if (rdmsrl_safe(msr, &v)) { |
| 407 | WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr); |
| 408 | /* |
| 409 | * Return zero in case the access faulted. This should |
| 410 | * not happen normally but can happen if the CPU does |
| 411 | * something weird, or if the code is buggy. |
| 412 | */ |
| 413 | v = 0; |
| 414 | } |
| 415 | |
| 416 | return v; |
| 417 | } |
| 418 | |
| 419 | static void mce_wrmsrl(u32 msr, u64 v) |
| 420 | { |
| 421 | if (__this_cpu_read(injectm.finished)) { |
| 422 | int offset = msr_to_offset(msr); |
| 423 | |
| 424 | if (offset >= 0) |
| 425 | *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; |
| 426 | return; |
| 427 | } |
| 428 | wrmsrl(msr, v); |
| 429 | } |
| 430 | |
| 431 | /* |
| 432 | * Collect all global (w.r.t. this processor) status about this machine |
| 433 | * check into our "mce" struct so that we can use it later to assess |
| 434 | * the severity of the problem as we read per-bank specific details. |
| 435 | */ |
| 436 | static inline void mce_gather_info(struct mce *m, struct pt_regs *regs) |
| 437 | { |
| 438 | mce_setup(m); |
| 439 | |
| 440 | m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); |
| 441 | if (regs) { |
| 442 | /* |
| 443 | * Get the address of the instruction at the time of |
| 444 | * the machine check error. |
| 445 | */ |
| 446 | if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) { |
| 447 | m->ip = regs->ip; |
| 448 | m->cs = regs->cs; |
| 449 | |
| 450 | /* |
| 451 | * When in VM86 mode make the cs look like ring 3 |
| 452 | * always. This is a lie, but it's better than passing |
| 453 | * the additional vm86 bit around everywhere. |
| 454 | */ |
| 455 | if (v8086_mode(regs)) |
| 456 | m->cs |= 3; |
| 457 | } |
| 458 | /* Use accurate RIP reporting if available. */ |
| 459 | if (mca_cfg.rip_msr) |
| 460 | m->ip = mce_rdmsrl(mca_cfg.rip_msr); |
| 461 | } |
| 462 | } |
| 463 | |
| 464 | /* |
| 465 | * Simple lockless ring to communicate PFNs from the exception handler with the |
| 466 | * process context work function. This is vastly simplified because there's |
| 467 | * only a single reader and a single writer. |
| 468 | */ |
| 469 | #define MCE_RING_SIZE 16 /* we use one entry less */ |
| 470 | |
| 471 | struct mce_ring { |
| 472 | unsigned short start; |
| 473 | unsigned short end; |
| 474 | unsigned long ring[MCE_RING_SIZE]; |
| 475 | }; |
| 476 | static DEFINE_PER_CPU(struct mce_ring, mce_ring); |
| 477 | |
| 478 | /* Runs with CPU affinity in workqueue */ |
| 479 | static int mce_ring_empty(void) |
| 480 | { |
| 481 | struct mce_ring *r = this_cpu_ptr(&mce_ring); |
| 482 | |
| 483 | return r->start == r->end; |
| 484 | } |
| 485 | |
| 486 | static int mce_ring_get(unsigned long *pfn) |
| 487 | { |
| 488 | struct mce_ring *r; |
| 489 | int ret = 0; |
| 490 | |
| 491 | *pfn = 0; |
| 492 | get_cpu(); |
| 493 | r = this_cpu_ptr(&mce_ring); |
| 494 | if (r->start == r->end) |
| 495 | goto out; |
| 496 | *pfn = r->ring[r->start]; |
| 497 | r->start = (r->start + 1) % MCE_RING_SIZE; |
| 498 | ret = 1; |
| 499 | out: |
| 500 | put_cpu(); |
| 501 | return ret; |
| 502 | } |
| 503 | |
| 504 | /* Always runs in MCE context with preempt off */ |
| 505 | static int mce_ring_add(unsigned long pfn) |
| 506 | { |
| 507 | struct mce_ring *r = this_cpu_ptr(&mce_ring); |
| 508 | unsigned next; |
| 509 | |
| 510 | next = (r->end + 1) % MCE_RING_SIZE; |
| 511 | if (next == r->start) |
| 512 | return -1; |
| 513 | r->ring[r->end] = pfn; |
| 514 | wmb(); |
| 515 | r->end = next; |
| 516 | return 0; |
| 517 | } |
| 518 | |
| 519 | int mce_available(struct cpuinfo_x86 *c) |
| 520 | { |
| 521 | if (mca_cfg.disabled) |
| 522 | return 0; |
| 523 | return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); |
| 524 | } |
| 525 | |
| 526 | static void mce_schedule_work(void) |
| 527 | { |
| 528 | if (!mce_ring_empty()) |
| 529 | schedule_work(this_cpu_ptr(&mce_work)); |
| 530 | } |
| 531 | |
| 532 | DEFINE_PER_CPU(struct irq_work, mce_irq_work); |
| 533 | |
| 534 | static void mce_irq_work_cb(struct irq_work *entry) |
| 535 | { |
| 536 | mce_notify_irq(); |
| 537 | mce_schedule_work(); |
| 538 | } |
| 539 | |
| 540 | static void mce_report_event(struct pt_regs *regs) |
| 541 | { |
| 542 | if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) { |
| 543 | mce_notify_irq(); |
| 544 | /* |
| 545 | * Triggering the work queue here is just an insurance |
| 546 | * policy in case the syscall exit notify handler |
| 547 | * doesn't run soon enough or ends up running on the |
| 548 | * wrong CPU (can happen when audit sleeps) |
| 549 | */ |
| 550 | mce_schedule_work(); |
| 551 | return; |
| 552 | } |
| 553 | |
| 554 | irq_work_queue(this_cpu_ptr(&mce_irq_work)); |
| 555 | } |
| 556 | |
| 557 | /* |
| 558 | * Read ADDR and MISC registers. |
| 559 | */ |
| 560 | static void mce_read_aux(struct mce *m, int i) |
| 561 | { |
| 562 | if (m->status & MCI_STATUS_MISCV) |
| 563 | m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i)); |
| 564 | if (m->status & MCI_STATUS_ADDRV) { |
| 565 | m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i)); |
| 566 | |
| 567 | /* |
| 568 | * Mask the reported address by the reported granularity. |
| 569 | */ |
| 570 | if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) { |
| 571 | u8 shift = MCI_MISC_ADDR_LSB(m->misc); |
| 572 | m->addr >>= shift; |
| 573 | m->addr <<= shift; |
| 574 | } |
| 575 | } |
| 576 | } |
| 577 | |
| 578 | static bool memory_error(struct mce *m) |
| 579 | { |
| 580 | struct cpuinfo_x86 *c = &boot_cpu_data; |
| 581 | |
| 582 | if (c->x86_vendor == X86_VENDOR_AMD) { |
| 583 | /* |
| 584 | * coming soon |
| 585 | */ |
| 586 | return false; |
| 587 | } else if (c->x86_vendor == X86_VENDOR_INTEL) { |
| 588 | /* |
| 589 | * Intel SDM Volume 3B - 15.9.2 Compound Error Codes |
| 590 | * |
| 591 | * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for |
| 592 | * indicating a memory error. Bit 8 is used for indicating a |
| 593 | * cache hierarchy error. The combination of bit 2 and bit 3 |
| 594 | * is used for indicating a `generic' cache hierarchy error |
| 595 | * But we can't just blindly check the above bits, because if |
| 596 | * bit 11 is set, then it is a bus/interconnect error - and |
| 597 | * either way the above bits just gives more detail on what |
| 598 | * bus/interconnect error happened. Note that bit 12 can be |
| 599 | * ignored, as it's the "filter" bit. |
| 600 | */ |
| 601 | return (m->status & 0xef80) == BIT(7) || |
| 602 | (m->status & 0xef00) == BIT(8) || |
| 603 | (m->status & 0xeffc) == 0xc; |
| 604 | } |
| 605 | |
| 606 | return false; |
| 607 | } |
| 608 | |
| 609 | DEFINE_PER_CPU(unsigned, mce_poll_count); |
| 610 | |
| 611 | /* |
| 612 | * Poll for corrected events or events that happened before reset. |
| 613 | * Those are just logged through /dev/mcelog. |
| 614 | * |
| 615 | * This is executed in standard interrupt context. |
| 616 | * |
| 617 | * Note: spec recommends to panic for fatal unsignalled |
| 618 | * errors here. However this would be quite problematic -- |
| 619 | * we would need to reimplement the Monarch handling and |
| 620 | * it would mess up the exclusion between exception handler |
| 621 | * and poll hander -- * so we skip this for now. |
| 622 | * These cases should not happen anyways, or only when the CPU |
| 623 | * is already totally * confused. In this case it's likely it will |
| 624 | * not fully execute the machine check handler either. |
| 625 | */ |
| 626 | void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) |
| 627 | { |
| 628 | struct mce m; |
| 629 | int severity; |
| 630 | int i; |
| 631 | |
| 632 | this_cpu_inc(mce_poll_count); |
| 633 | |
| 634 | mce_gather_info(&m, NULL); |
| 635 | |
| 636 | for (i = 0; i < mca_cfg.banks; i++) { |
| 637 | if (!mce_banks[i].ctl || !test_bit(i, *b)) |
| 638 | continue; |
| 639 | |
| 640 | m.misc = 0; |
| 641 | m.addr = 0; |
| 642 | m.bank = i; |
| 643 | m.tsc = 0; |
| 644 | |
| 645 | barrier(); |
| 646 | m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); |
| 647 | if (!(m.status & MCI_STATUS_VAL)) |
| 648 | continue; |
| 649 | |
| 650 | this_cpu_write(mce_polled_error, 1); |
| 651 | /* |
| 652 | * Uncorrected or signalled events are handled by the exception |
| 653 | * handler when it is enabled, so don't process those here. |
| 654 | * |
| 655 | * TBD do the same check for MCI_STATUS_EN here? |
| 656 | */ |
| 657 | if (!(flags & MCP_UC) && |
| 658 | (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC))) |
| 659 | continue; |
| 660 | |
| 661 | mce_read_aux(&m, i); |
| 662 | |
| 663 | if (!(flags & MCP_TIMESTAMP)) |
| 664 | m.tsc = 0; |
| 665 | |
| 666 | severity = mce_severity(&m, mca_cfg.tolerant, NULL, false); |
| 667 | |
| 668 | /* |
| 669 | * In the cases where we don't have a valid address after all, |
| 670 | * do not add it into the ring buffer. |
| 671 | */ |
| 672 | if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m)) { |
| 673 | if (m.status & MCI_STATUS_ADDRV) { |
| 674 | mce_ring_add(m.addr >> PAGE_SHIFT); |
| 675 | mce_schedule_work(); |
| 676 | } |
| 677 | } |
| 678 | |
| 679 | /* |
| 680 | * Don't get the IP here because it's unlikely to |
| 681 | * have anything to do with the actual error location. |
| 682 | */ |
| 683 | if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce) |
| 684 | mce_log(&m); |
| 685 | |
| 686 | /* |
| 687 | * Clear state for this bank. |
| 688 | */ |
| 689 | mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0); |
| 690 | } |
| 691 | |
| 692 | /* |
| 693 | * Don't clear MCG_STATUS here because it's only defined for |
| 694 | * exceptions. |
| 695 | */ |
| 696 | |
| 697 | sync_core(); |
| 698 | } |
| 699 | EXPORT_SYMBOL_GPL(machine_check_poll); |
| 700 | |
| 701 | /* |
| 702 | * Do a quick check if any of the events requires a panic. |
| 703 | * This decides if we keep the events around or clear them. |
| 704 | */ |
| 705 | static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, |
| 706 | struct pt_regs *regs) |
| 707 | { |
| 708 | int i, ret = 0; |
| 709 | |
| 710 | for (i = 0; i < mca_cfg.banks; i++) { |
| 711 | m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); |
| 712 | if (m->status & MCI_STATUS_VAL) { |
| 713 | __set_bit(i, validp); |
| 714 | if (quirk_no_way_out) |
| 715 | quirk_no_way_out(i, m, regs); |
| 716 | } |
| 717 | if (mce_severity(m, mca_cfg.tolerant, msg, true) >= |
| 718 | MCE_PANIC_SEVERITY) |
| 719 | ret = 1; |
| 720 | } |
| 721 | return ret; |
| 722 | } |
| 723 | |
| 724 | /* |
| 725 | * Variable to establish order between CPUs while scanning. |
| 726 | * Each CPU spins initially until executing is equal its number. |
| 727 | */ |
| 728 | static atomic_t mce_executing; |
| 729 | |
| 730 | /* |
| 731 | * Defines order of CPUs on entry. First CPU becomes Monarch. |
| 732 | */ |
| 733 | static atomic_t mce_callin; |
| 734 | |
| 735 | /* |
| 736 | * Check if a timeout waiting for other CPUs happened. |
| 737 | */ |
| 738 | static int mce_timed_out(u64 *t) |
| 739 | { |
| 740 | /* |
| 741 | * The others already did panic for some reason. |
| 742 | * Bail out like in a timeout. |
| 743 | * rmb() to tell the compiler that system_state |
| 744 | * might have been modified by someone else. |
| 745 | */ |
| 746 | rmb(); |
| 747 | if (atomic_read(&mce_panicked)) |
| 748 | wait_for_panic(); |
| 749 | if (!mca_cfg.monarch_timeout) |
| 750 | goto out; |
| 751 | if ((s64)*t < SPINUNIT) { |
| 752 | if (mca_cfg.tolerant <= 1) |
| 753 | mce_panic("Timeout synchronizing machine check over CPUs", |
| 754 | NULL, NULL); |
| 755 | cpu_missing = 1; |
| 756 | return 1; |
| 757 | } |
| 758 | *t -= SPINUNIT; |
| 759 | out: |
| 760 | touch_nmi_watchdog(); |
| 761 | return 0; |
| 762 | } |
| 763 | |
| 764 | /* |
| 765 | * The Monarch's reign. The Monarch is the CPU who entered |
| 766 | * the machine check handler first. It waits for the others to |
| 767 | * raise the exception too and then grades them. When any |
| 768 | * error is fatal panic. Only then let the others continue. |
| 769 | * |
| 770 | * The other CPUs entering the MCE handler will be controlled by the |
| 771 | * Monarch. They are called Subjects. |
| 772 | * |
| 773 | * This way we prevent any potential data corruption in a unrecoverable case |
| 774 | * and also makes sure always all CPU's errors are examined. |
| 775 | * |
| 776 | * Also this detects the case of a machine check event coming from outer |
| 777 | * space (not detected by any CPUs) In this case some external agent wants |
| 778 | * us to shut down, so panic too. |
| 779 | * |
| 780 | * The other CPUs might still decide to panic if the handler happens |
| 781 | * in a unrecoverable place, but in this case the system is in a semi-stable |
| 782 | * state and won't corrupt anything by itself. It's ok to let the others |
| 783 | * continue for a bit first. |
| 784 | * |
| 785 | * All the spin loops have timeouts; when a timeout happens a CPU |
| 786 | * typically elects itself to be Monarch. |
| 787 | */ |
| 788 | static void mce_reign(void) |
| 789 | { |
| 790 | int cpu; |
| 791 | struct mce *m = NULL; |
| 792 | int global_worst = 0; |
| 793 | char *msg = NULL; |
| 794 | char *nmsg = NULL; |
| 795 | |
| 796 | /* |
| 797 | * This CPU is the Monarch and the other CPUs have run |
| 798 | * through their handlers. |
| 799 | * Grade the severity of the errors of all the CPUs. |
| 800 | */ |
| 801 | for_each_possible_cpu(cpu) { |
| 802 | int severity = mce_severity(&per_cpu(mces_seen, cpu), |
| 803 | mca_cfg.tolerant, |
| 804 | &nmsg, true); |
| 805 | if (severity > global_worst) { |
| 806 | msg = nmsg; |
| 807 | global_worst = severity; |
| 808 | m = &per_cpu(mces_seen, cpu); |
| 809 | } |
| 810 | } |
| 811 | |
| 812 | /* |
| 813 | * Cannot recover? Panic here then. |
| 814 | * This dumps all the mces in the log buffer and stops the |
| 815 | * other CPUs. |
| 816 | */ |
| 817 | if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) |
| 818 | mce_panic("Fatal Machine check", m, msg); |
| 819 | |
| 820 | /* |
| 821 | * For UC somewhere we let the CPU who detects it handle it. |
| 822 | * Also must let continue the others, otherwise the handling |
| 823 | * CPU could deadlock on a lock. |
| 824 | */ |
| 825 | |
| 826 | /* |
| 827 | * No machine check event found. Must be some external |
| 828 | * source or one CPU is hung. Panic. |
| 829 | */ |
| 830 | if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3) |
| 831 | mce_panic("Machine check from unknown source", NULL, NULL); |
| 832 | |
| 833 | /* |
| 834 | * Now clear all the mces_seen so that they don't reappear on |
| 835 | * the next mce. |
| 836 | */ |
| 837 | for_each_possible_cpu(cpu) |
| 838 | memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce)); |
| 839 | } |
| 840 | |
| 841 | static atomic_t global_nwo; |
| 842 | |
| 843 | /* |
| 844 | * Start of Monarch synchronization. This waits until all CPUs have |
| 845 | * entered the exception handler and then determines if any of them |
| 846 | * saw a fatal event that requires panic. Then it executes them |
| 847 | * in the entry order. |
| 848 | * TBD double check parallel CPU hotunplug |
| 849 | */ |
| 850 | static int mce_start(int *no_way_out) |
| 851 | { |
| 852 | int order; |
| 853 | int cpus = num_online_cpus(); |
| 854 | u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; |
| 855 | |
| 856 | if (!timeout) |
| 857 | return -1; |
| 858 | |
| 859 | atomic_add(*no_way_out, &global_nwo); |
| 860 | /* |
| 861 | * global_nwo should be updated before mce_callin |
| 862 | */ |
| 863 | smp_wmb(); |
| 864 | order = atomic_inc_return(&mce_callin); |
| 865 | |
| 866 | /* |
| 867 | * Wait for everyone. |
| 868 | */ |
| 869 | while (atomic_read(&mce_callin) != cpus) { |
| 870 | if (mce_timed_out(&timeout)) { |
| 871 | atomic_set(&global_nwo, 0); |
| 872 | return -1; |
| 873 | } |
| 874 | ndelay(SPINUNIT); |
| 875 | } |
| 876 | |
| 877 | /* |
| 878 | * mce_callin should be read before global_nwo |
| 879 | */ |
| 880 | smp_rmb(); |
| 881 | |
| 882 | if (order == 1) { |
| 883 | /* |
| 884 | * Monarch: Starts executing now, the others wait. |
| 885 | */ |
| 886 | atomic_set(&mce_executing, 1); |
| 887 | } else { |
| 888 | /* |
| 889 | * Subject: Now start the scanning loop one by one in |
| 890 | * the original callin order. |
| 891 | * This way when there are any shared banks it will be |
| 892 | * only seen by one CPU before cleared, avoiding duplicates. |
| 893 | */ |
| 894 | while (atomic_read(&mce_executing) < order) { |
| 895 | if (mce_timed_out(&timeout)) { |
| 896 | atomic_set(&global_nwo, 0); |
| 897 | return -1; |
| 898 | } |
| 899 | ndelay(SPINUNIT); |
| 900 | } |
| 901 | } |
| 902 | |
| 903 | /* |
| 904 | * Cache the global no_way_out state. |
| 905 | */ |
| 906 | *no_way_out = atomic_read(&global_nwo); |
| 907 | |
| 908 | return order; |
| 909 | } |
| 910 | |
| 911 | /* |
| 912 | * Synchronize between CPUs after main scanning loop. |
| 913 | * This invokes the bulk of the Monarch processing. |
| 914 | */ |
| 915 | static int mce_end(int order) |
| 916 | { |
| 917 | int ret = -1; |
| 918 | u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; |
| 919 | |
| 920 | if (!timeout) |
| 921 | goto reset; |
| 922 | if (order < 0) |
| 923 | goto reset; |
| 924 | |
| 925 | /* |
| 926 | * Allow others to run. |
| 927 | */ |
| 928 | atomic_inc(&mce_executing); |
| 929 | |
| 930 | if (order == 1) { |
| 931 | /* CHECKME: Can this race with a parallel hotplug? */ |
| 932 | int cpus = num_online_cpus(); |
| 933 | |
| 934 | /* |
| 935 | * Monarch: Wait for everyone to go through their scanning |
| 936 | * loops. |
| 937 | */ |
| 938 | while (atomic_read(&mce_executing) <= cpus) { |
| 939 | if (mce_timed_out(&timeout)) |
| 940 | goto reset; |
| 941 | ndelay(SPINUNIT); |
| 942 | } |
| 943 | |
| 944 | mce_reign(); |
| 945 | barrier(); |
| 946 | ret = 0; |
| 947 | } else { |
| 948 | /* |
| 949 | * Subject: Wait for Monarch to finish. |
| 950 | */ |
| 951 | while (atomic_read(&mce_executing) != 0) { |
| 952 | if (mce_timed_out(&timeout)) |
| 953 | goto reset; |
| 954 | ndelay(SPINUNIT); |
| 955 | } |
| 956 | |
| 957 | /* |
| 958 | * Don't reset anything. That's done by the Monarch. |
| 959 | */ |
| 960 | return 0; |
| 961 | } |
| 962 | |
| 963 | /* |
| 964 | * Reset all global state. |
| 965 | */ |
| 966 | reset: |
| 967 | atomic_set(&global_nwo, 0); |
| 968 | atomic_set(&mce_callin, 0); |
| 969 | barrier(); |
| 970 | |
| 971 | /* |
| 972 | * Let others run again. |
| 973 | */ |
| 974 | atomic_set(&mce_executing, 0); |
| 975 | return ret; |
| 976 | } |
| 977 | |
| 978 | /* |
| 979 | * Check if the address reported by the CPU is in a format we can parse. |
| 980 | * It would be possible to add code for most other cases, but all would |
| 981 | * be somewhat complicated (e.g. segment offset would require an instruction |
| 982 | * parser). So only support physical addresses up to page granuality for now. |
| 983 | */ |
| 984 | static int mce_usable_address(struct mce *m) |
| 985 | { |
| 986 | if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV)) |
| 987 | return 0; |
| 988 | if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT) |
| 989 | return 0; |
| 990 | if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS) |
| 991 | return 0; |
| 992 | return 1; |
| 993 | } |
| 994 | |
| 995 | static void mce_clear_state(unsigned long *toclear) |
| 996 | { |
| 997 | int i; |
| 998 | |
| 999 | for (i = 0; i < mca_cfg.banks; i++) { |
| 1000 | if (test_bit(i, toclear)) |
| 1001 | mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0); |
| 1002 | } |
| 1003 | } |
| 1004 | |
| 1005 | /* |
| 1006 | * Need to save faulting physical address associated with a process |
| 1007 | * in the machine check handler some place where we can grab it back |
| 1008 | * later in mce_notify_process() |
| 1009 | */ |
| 1010 | #define MCE_INFO_MAX 16 |
| 1011 | |
| 1012 | struct mce_info { |
| 1013 | atomic_t inuse; |
| 1014 | struct task_struct *t; |
| 1015 | __u64 paddr; |
| 1016 | int restartable; |
| 1017 | } mce_info[MCE_INFO_MAX]; |
| 1018 | |
| 1019 | static void mce_save_info(__u64 addr, int c) |
| 1020 | { |
| 1021 | struct mce_info *mi; |
| 1022 | |
| 1023 | for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++) { |
| 1024 | if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) { |
| 1025 | mi->t = current; |
| 1026 | mi->paddr = addr; |
| 1027 | mi->restartable = c; |
| 1028 | return; |
| 1029 | } |
| 1030 | } |
| 1031 | |
| 1032 | mce_panic("Too many concurrent recoverable errors", NULL, NULL); |
| 1033 | } |
| 1034 | |
| 1035 | static struct mce_info *mce_find_info(void) |
| 1036 | { |
| 1037 | struct mce_info *mi; |
| 1038 | |
| 1039 | for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++) |
| 1040 | if (atomic_read(&mi->inuse) && mi->t == current) |
| 1041 | return mi; |
| 1042 | return NULL; |
| 1043 | } |
| 1044 | |
| 1045 | static void mce_clear_info(struct mce_info *mi) |
| 1046 | { |
| 1047 | atomic_set(&mi->inuse, 0); |
| 1048 | } |
| 1049 | |
| 1050 | /* |
| 1051 | * The actual machine check handler. This only handles real |
| 1052 | * exceptions when something got corrupted coming in through int 18. |
| 1053 | * |
| 1054 | * This is executed in NMI context not subject to normal locking rules. This |
| 1055 | * implies that most kernel services cannot be safely used. Don't even |
| 1056 | * think about putting a printk in there! |
| 1057 | * |
| 1058 | * On Intel systems this is entered on all CPUs in parallel through |
| 1059 | * MCE broadcast. However some CPUs might be broken beyond repair, |
| 1060 | * so be always careful when synchronizing with others. |
| 1061 | */ |
| 1062 | void do_machine_check(struct pt_regs *regs, long error_code) |
| 1063 | { |
| 1064 | struct mca_config *cfg = &mca_cfg; |
| 1065 | struct mce m, *final; |
| 1066 | int i; |
| 1067 | int worst = 0; |
| 1068 | int severity; |
| 1069 | /* |
| 1070 | * Establish sequential order between the CPUs entering the machine |
| 1071 | * check handler. |
| 1072 | */ |
| 1073 | int order; |
| 1074 | /* |
| 1075 | * If no_way_out gets set, there is no safe way to recover from this |
| 1076 | * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway. |
| 1077 | */ |
| 1078 | int no_way_out = 0; |
| 1079 | /* |
| 1080 | * If kill_it gets set, there might be a way to recover from this |
| 1081 | * error. |
| 1082 | */ |
| 1083 | int kill_it = 0; |
| 1084 | DECLARE_BITMAP(toclear, MAX_NR_BANKS); |
| 1085 | DECLARE_BITMAP(valid_banks, MAX_NR_BANKS); |
| 1086 | char *msg = "Unknown"; |
| 1087 | |
| 1088 | this_cpu_inc(mce_exception_count); |
| 1089 | |
| 1090 | if (!cfg->banks) |
| 1091 | goto out; |
| 1092 | |
| 1093 | mce_gather_info(&m, regs); |
| 1094 | |
| 1095 | final = this_cpu_ptr(&mces_seen); |
| 1096 | *final = m; |
| 1097 | |
| 1098 | memset(valid_banks, 0, sizeof(valid_banks)); |
| 1099 | no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs); |
| 1100 | |
| 1101 | barrier(); |
| 1102 | |
| 1103 | /* |
| 1104 | * When no restart IP might need to kill or panic. |
| 1105 | * Assume the worst for now, but if we find the |
| 1106 | * severity is MCE_AR_SEVERITY we have other options. |
| 1107 | */ |
| 1108 | if (!(m.mcgstatus & MCG_STATUS_RIPV)) |
| 1109 | kill_it = 1; |
| 1110 | |
| 1111 | /* |
| 1112 | * Go through all the banks in exclusion of the other CPUs. |
| 1113 | * This way we don't report duplicated events on shared banks |
| 1114 | * because the first one to see it will clear it. |
| 1115 | */ |
| 1116 | order = mce_start(&no_way_out); |
| 1117 | for (i = 0; i < cfg->banks; i++) { |
| 1118 | __clear_bit(i, toclear); |
| 1119 | if (!test_bit(i, valid_banks)) |
| 1120 | continue; |
| 1121 | if (!mce_banks[i].ctl) |
| 1122 | continue; |
| 1123 | |
| 1124 | m.misc = 0; |
| 1125 | m.addr = 0; |
| 1126 | m.bank = i; |
| 1127 | |
| 1128 | m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); |
| 1129 | if ((m.status & MCI_STATUS_VAL) == 0) |
| 1130 | continue; |
| 1131 | |
| 1132 | /* |
| 1133 | * Non uncorrected or non signaled errors are handled by |
| 1134 | * machine_check_poll. Leave them alone, unless this panics. |
| 1135 | */ |
| 1136 | if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) && |
| 1137 | !no_way_out) |
| 1138 | continue; |
| 1139 | |
| 1140 | /* |
| 1141 | * Set taint even when machine check was not enabled. |
| 1142 | */ |
| 1143 | add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); |
| 1144 | |
| 1145 | severity = mce_severity(&m, cfg->tolerant, NULL, true); |
| 1146 | |
| 1147 | /* |
| 1148 | * When machine check was for corrected/deferred handler don't |
| 1149 | * touch, unless we're panicing. |
| 1150 | */ |
| 1151 | if ((severity == MCE_KEEP_SEVERITY || |
| 1152 | severity == MCE_UCNA_SEVERITY) && !no_way_out) |
| 1153 | continue; |
| 1154 | __set_bit(i, toclear); |
| 1155 | if (severity == MCE_NO_SEVERITY) { |
| 1156 | /* |
| 1157 | * Machine check event was not enabled. Clear, but |
| 1158 | * ignore. |
| 1159 | */ |
| 1160 | continue; |
| 1161 | } |
| 1162 | |
| 1163 | mce_read_aux(&m, i); |
| 1164 | |
| 1165 | /* |
| 1166 | * Action optional error. Queue address for later processing. |
| 1167 | * When the ring overflows we just ignore the AO error. |
| 1168 | * RED-PEN add some logging mechanism when |
| 1169 | * usable_address or mce_add_ring fails. |
| 1170 | * RED-PEN don't ignore overflow for mca_cfg.tolerant == 0 |
| 1171 | */ |
| 1172 | if (severity == MCE_AO_SEVERITY && mce_usable_address(&m)) |
| 1173 | mce_ring_add(m.addr >> PAGE_SHIFT); |
| 1174 | |
| 1175 | mce_log(&m); |
| 1176 | |
| 1177 | if (severity > worst) { |
| 1178 | *final = m; |
| 1179 | worst = severity; |
| 1180 | } |
| 1181 | } |
| 1182 | |
| 1183 | /* mce_clear_state will clear *final, save locally for use later */ |
| 1184 | m = *final; |
| 1185 | |
| 1186 | if (!no_way_out) |
| 1187 | mce_clear_state(toclear); |
| 1188 | |
| 1189 | /* |
| 1190 | * Do most of the synchronization with other CPUs. |
| 1191 | * When there's any problem use only local no_way_out state. |
| 1192 | */ |
| 1193 | if (mce_end(order) < 0) |
| 1194 | no_way_out = worst >= MCE_PANIC_SEVERITY; |
| 1195 | |
| 1196 | /* |
| 1197 | * At insane "tolerant" levels we take no action. Otherwise |
| 1198 | * we only die if we have no other choice. For less serious |
| 1199 | * issues we try to recover, or limit damage to the current |
| 1200 | * process. |
| 1201 | */ |
| 1202 | if (cfg->tolerant < 3) { |
| 1203 | if (no_way_out) |
| 1204 | mce_panic("Fatal machine check on current CPU", &m, msg); |
| 1205 | if (worst == MCE_AR_SEVERITY) { |
| 1206 | /* schedule action before return to userland */ |
| 1207 | mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV); |
| 1208 | set_thread_flag(TIF_MCE_NOTIFY); |
| 1209 | } else if (kill_it) { |
| 1210 | force_sig(SIGBUS, current); |
| 1211 | } |
| 1212 | } |
| 1213 | |
| 1214 | if (worst > 0) |
| 1215 | mce_report_event(regs); |
| 1216 | mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); |
| 1217 | out: |
| 1218 | sync_core(); |
| 1219 | } |
| 1220 | EXPORT_SYMBOL_GPL(do_machine_check); |
| 1221 | |
| 1222 | #ifndef CONFIG_MEMORY_FAILURE |
| 1223 | int memory_failure(unsigned long pfn, int vector, int flags) |
| 1224 | { |
| 1225 | /* mce_severity() should not hand us an ACTION_REQUIRED error */ |
| 1226 | BUG_ON(flags & MF_ACTION_REQUIRED); |
| 1227 | pr_err("Uncorrected memory error in page 0x%lx ignored\n" |
| 1228 | "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", |
| 1229 | pfn); |
| 1230 | |
| 1231 | return 0; |
| 1232 | } |
| 1233 | #endif |
| 1234 | |
| 1235 | /* |
| 1236 | * Called in process context that interrupted by MCE and marked with |
| 1237 | * TIF_MCE_NOTIFY, just before returning to erroneous userland. |
| 1238 | * This code is allowed to sleep. |
| 1239 | * Attempt possible recovery such as calling the high level VM handler to |
| 1240 | * process any corrupted pages, and kill/signal current process if required. |
| 1241 | * Action required errors are handled here. |
| 1242 | */ |
| 1243 | void mce_notify_process(void) |
| 1244 | { |
| 1245 | unsigned long pfn; |
| 1246 | struct mce_info *mi = mce_find_info(); |
| 1247 | int flags = MF_ACTION_REQUIRED; |
| 1248 | |
| 1249 | if (!mi) |
| 1250 | mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL); |
| 1251 | pfn = mi->paddr >> PAGE_SHIFT; |
| 1252 | |
| 1253 | clear_thread_flag(TIF_MCE_NOTIFY); |
| 1254 | |
| 1255 | pr_err("Uncorrected hardware memory error in user-access at %llx", |
| 1256 | mi->paddr); |
| 1257 | /* |
| 1258 | * We must call memory_failure() here even if the current process is |
| 1259 | * doomed. We still need to mark the page as poisoned and alert any |
| 1260 | * other users of the page. |
| 1261 | */ |
| 1262 | if (!mi->restartable) |
| 1263 | flags |= MF_MUST_KILL; |
| 1264 | if (memory_failure(pfn, MCE_VECTOR, flags) < 0) { |
| 1265 | pr_err("Memory error not recovered"); |
| 1266 | force_sig(SIGBUS, current); |
| 1267 | } |
| 1268 | mce_clear_info(mi); |
| 1269 | } |
| 1270 | |
| 1271 | /* |
| 1272 | * Action optional processing happens here (picking up |
| 1273 | * from the list of faulting pages that do_machine_check() |
| 1274 | * placed into the "ring"). |
| 1275 | */ |
| 1276 | static void mce_process_work(struct work_struct *dummy) |
| 1277 | { |
| 1278 | unsigned long pfn; |
| 1279 | |
| 1280 | while (mce_ring_get(&pfn)) |
| 1281 | memory_failure(pfn, MCE_VECTOR, 0); |
| 1282 | } |
| 1283 | |
| 1284 | #ifdef CONFIG_X86_MCE_INTEL |
| 1285 | /*** |
| 1286 | * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog |
| 1287 | * @cpu: The CPU on which the event occurred. |
| 1288 | * @status: Event status information |
| 1289 | * |
| 1290 | * This function should be called by the thermal interrupt after the |
| 1291 | * event has been processed and the decision was made to log the event |
| 1292 | * further. |
| 1293 | * |
| 1294 | * The status parameter will be saved to the 'status' field of 'struct mce' |
| 1295 | * and historically has been the register value of the |
| 1296 | * MSR_IA32_THERMAL_STATUS (Intel) msr. |
| 1297 | */ |
| 1298 | void mce_log_therm_throt_event(__u64 status) |
| 1299 | { |
| 1300 | struct mce m; |
| 1301 | |
| 1302 | mce_setup(&m); |
| 1303 | m.bank = MCE_THERMAL_BANK; |
| 1304 | m.status = status; |
| 1305 | mce_log(&m); |
| 1306 | } |
| 1307 | #endif /* CONFIG_X86_MCE_INTEL */ |
| 1308 | |
| 1309 | /* |
| 1310 | * Periodic polling timer for "silent" machine check errors. If the |
| 1311 | * poller finds an MCE, poll 2x faster. When the poller finds no more |
| 1312 | * errors, poll 2x slower (up to check_interval seconds). |
| 1313 | */ |
| 1314 | static unsigned long check_interval = 5 * 60; /* 5 minutes */ |
| 1315 | |
| 1316 | static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ |
| 1317 | static DEFINE_PER_CPU(struct timer_list, mce_timer); |
| 1318 | |
| 1319 | static unsigned long mce_adjust_timer_default(unsigned long interval) |
| 1320 | { |
| 1321 | return interval; |
| 1322 | } |
| 1323 | |
| 1324 | static unsigned long (*mce_adjust_timer)(unsigned long interval) = |
| 1325 | mce_adjust_timer_default; |
| 1326 | |
| 1327 | static int cmc_error_seen(void) |
| 1328 | { |
| 1329 | unsigned long *v = this_cpu_ptr(&mce_polled_error); |
| 1330 | |
| 1331 | return test_and_clear_bit(0, v); |
| 1332 | } |
| 1333 | |
| 1334 | static void mce_timer_fn(unsigned long data) |
| 1335 | { |
| 1336 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
| 1337 | unsigned long iv; |
| 1338 | int notify; |
| 1339 | |
| 1340 | WARN_ON(smp_processor_id() != data); |
| 1341 | |
| 1342 | if (mce_available(this_cpu_ptr(&cpu_info))) { |
| 1343 | machine_check_poll(MCP_TIMESTAMP, |
| 1344 | this_cpu_ptr(&mce_poll_banks)); |
| 1345 | mce_intel_cmci_poll(); |
| 1346 | } |
| 1347 | |
| 1348 | /* |
| 1349 | * Alert userspace if needed. If we logged an MCE, reduce the |
| 1350 | * polling interval, otherwise increase the polling interval. |
| 1351 | */ |
| 1352 | iv = __this_cpu_read(mce_next_interval); |
| 1353 | notify = mce_notify_irq(); |
| 1354 | notify |= cmc_error_seen(); |
| 1355 | if (notify) { |
| 1356 | iv = max(iv / 2, (unsigned long) HZ/100); |
| 1357 | } else { |
| 1358 | iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); |
| 1359 | iv = mce_adjust_timer(iv); |
| 1360 | } |
| 1361 | __this_cpu_write(mce_next_interval, iv); |
| 1362 | /* Might have become 0 after CMCI storm subsided */ |
| 1363 | if (iv) { |
| 1364 | t->expires = jiffies + iv; |
| 1365 | add_timer_on(t, smp_processor_id()); |
| 1366 | } |
| 1367 | } |
| 1368 | |
| 1369 | /* |
| 1370 | * Ensure that the timer is firing in @interval from now. |
| 1371 | */ |
| 1372 | void mce_timer_kick(unsigned long interval) |
| 1373 | { |
| 1374 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
| 1375 | unsigned long when = jiffies + interval; |
| 1376 | unsigned long iv = __this_cpu_read(mce_next_interval); |
| 1377 | |
| 1378 | if (timer_pending(t)) { |
| 1379 | if (time_before(when, t->expires)) |
| 1380 | mod_timer_pinned(t, when); |
| 1381 | } else { |
| 1382 | t->expires = round_jiffies(when); |
| 1383 | add_timer_on(t, smp_processor_id()); |
| 1384 | } |
| 1385 | if (interval < iv) |
| 1386 | __this_cpu_write(mce_next_interval, interval); |
| 1387 | } |
| 1388 | |
| 1389 | /* Must not be called in IRQ context where del_timer_sync() can deadlock */ |
| 1390 | static void mce_timer_delete_all(void) |
| 1391 | { |
| 1392 | int cpu; |
| 1393 | |
| 1394 | for_each_online_cpu(cpu) |
| 1395 | del_timer_sync(&per_cpu(mce_timer, cpu)); |
| 1396 | } |
| 1397 | |
| 1398 | static void mce_do_trigger(struct work_struct *work) |
| 1399 | { |
| 1400 | call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT); |
| 1401 | } |
| 1402 | |
| 1403 | static DECLARE_WORK(mce_trigger_work, mce_do_trigger); |
| 1404 | |
| 1405 | /* |
| 1406 | * Notify the user(s) about new machine check events. |
| 1407 | * Can be called from interrupt context, but not from machine check/NMI |
| 1408 | * context. |
| 1409 | */ |
| 1410 | int mce_notify_irq(void) |
| 1411 | { |
| 1412 | /* Not more than two messages every minute */ |
| 1413 | static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); |
| 1414 | |
| 1415 | if (test_and_clear_bit(0, &mce_need_notify)) { |
| 1416 | /* wake processes polling /dev/mcelog */ |
| 1417 | wake_up_interruptible(&mce_chrdev_wait); |
| 1418 | |
| 1419 | if (mce_helper[0]) |
| 1420 | schedule_work(&mce_trigger_work); |
| 1421 | |
| 1422 | if (__ratelimit(&ratelimit)) |
| 1423 | pr_info(HW_ERR "Machine check events logged\n"); |
| 1424 | |
| 1425 | return 1; |
| 1426 | } |
| 1427 | return 0; |
| 1428 | } |
| 1429 | EXPORT_SYMBOL_GPL(mce_notify_irq); |
| 1430 | |
| 1431 | static int __mcheck_cpu_mce_banks_init(void) |
| 1432 | { |
| 1433 | int i; |
| 1434 | u8 num_banks = mca_cfg.banks; |
| 1435 | |
| 1436 | mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL); |
| 1437 | if (!mce_banks) |
| 1438 | return -ENOMEM; |
| 1439 | |
| 1440 | for (i = 0; i < num_banks; i++) { |
| 1441 | struct mce_bank *b = &mce_banks[i]; |
| 1442 | |
| 1443 | b->ctl = -1ULL; |
| 1444 | b->init = 1; |
| 1445 | } |
| 1446 | return 0; |
| 1447 | } |
| 1448 | |
| 1449 | /* |
| 1450 | * Initialize Machine Checks for a CPU. |
| 1451 | */ |
| 1452 | static int __mcheck_cpu_cap_init(void) |
| 1453 | { |
| 1454 | unsigned b; |
| 1455 | u64 cap; |
| 1456 | |
| 1457 | rdmsrl(MSR_IA32_MCG_CAP, cap); |
| 1458 | |
| 1459 | b = cap & MCG_BANKCNT_MASK; |
| 1460 | if (!mca_cfg.banks) |
| 1461 | pr_info("CPU supports %d MCE banks\n", b); |
| 1462 | |
| 1463 | if (b > MAX_NR_BANKS) { |
| 1464 | pr_warn("Using only %u machine check banks out of %u\n", |
| 1465 | MAX_NR_BANKS, b); |
| 1466 | b = MAX_NR_BANKS; |
| 1467 | } |
| 1468 | |
| 1469 | /* Don't support asymmetric configurations today */ |
| 1470 | WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks); |
| 1471 | mca_cfg.banks = b; |
| 1472 | |
| 1473 | if (!mce_banks) { |
| 1474 | int err = __mcheck_cpu_mce_banks_init(); |
| 1475 | |
| 1476 | if (err) |
| 1477 | return err; |
| 1478 | } |
| 1479 | |
| 1480 | /* Use accurate RIP reporting if available. */ |
| 1481 | if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9) |
| 1482 | mca_cfg.rip_msr = MSR_IA32_MCG_EIP; |
| 1483 | |
| 1484 | if (cap & MCG_SER_P) |
| 1485 | mca_cfg.ser = true; |
| 1486 | |
| 1487 | return 0; |
| 1488 | } |
| 1489 | |
| 1490 | static void __mcheck_cpu_init_generic(void) |
| 1491 | { |
| 1492 | enum mcp_flags m_fl = 0; |
| 1493 | mce_banks_t all_banks; |
| 1494 | u64 cap; |
| 1495 | int i; |
| 1496 | |
| 1497 | if (!mca_cfg.bootlog) |
| 1498 | m_fl = MCP_DONTLOG; |
| 1499 | |
| 1500 | /* |
| 1501 | * Log the machine checks left over from the previous reset. |
| 1502 | */ |
| 1503 | bitmap_fill(all_banks, MAX_NR_BANKS); |
| 1504 | machine_check_poll(MCP_UC | m_fl, &all_banks); |
| 1505 | |
| 1506 | set_in_cr4(X86_CR4_MCE); |
| 1507 | |
| 1508 | rdmsrl(MSR_IA32_MCG_CAP, cap); |
| 1509 | if (cap & MCG_CTL_P) |
| 1510 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); |
| 1511 | |
| 1512 | for (i = 0; i < mca_cfg.banks; i++) { |
| 1513 | struct mce_bank *b = &mce_banks[i]; |
| 1514 | |
| 1515 | if (!b->init) |
| 1516 | continue; |
| 1517 | wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); |
| 1518 | wrmsrl(MSR_IA32_MCx_STATUS(i), 0); |
| 1519 | } |
| 1520 | } |
| 1521 | |
| 1522 | /* |
| 1523 | * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and |
| 1524 | * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM |
| 1525 | * Vol 3B Table 15-20). But this confuses both the code that determines |
| 1526 | * whether the machine check occurred in kernel or user mode, and also |
| 1527 | * the severity assessment code. Pretend that EIPV was set, and take the |
| 1528 | * ip/cs values from the pt_regs that mce_gather_info() ignored earlier. |
| 1529 | */ |
| 1530 | static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) |
| 1531 | { |
| 1532 | if (bank != 0) |
| 1533 | return; |
| 1534 | if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0) |
| 1535 | return; |
| 1536 | if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC| |
| 1537 | MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV| |
| 1538 | MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR| |
| 1539 | MCACOD)) != |
| 1540 | (MCI_STATUS_UC|MCI_STATUS_EN| |
| 1541 | MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S| |
| 1542 | MCI_STATUS_AR|MCACOD_INSTR)) |
| 1543 | return; |
| 1544 | |
| 1545 | m->mcgstatus |= MCG_STATUS_EIPV; |
| 1546 | m->ip = regs->ip; |
| 1547 | m->cs = regs->cs; |
| 1548 | } |
| 1549 | |
| 1550 | /* Add per CPU specific workarounds here */ |
| 1551 | static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) |
| 1552 | { |
| 1553 | struct mca_config *cfg = &mca_cfg; |
| 1554 | |
| 1555 | if (c->x86_vendor == X86_VENDOR_UNKNOWN) { |
| 1556 | pr_info("unknown CPU type - not enabling MCE support\n"); |
| 1557 | return -EOPNOTSUPP; |
| 1558 | } |
| 1559 | |
| 1560 | /* This should be disabled by the BIOS, but isn't always */ |
| 1561 | if (c->x86_vendor == X86_VENDOR_AMD) { |
| 1562 | if (c->x86 == 15 && cfg->banks > 4) { |
| 1563 | /* |
| 1564 | * disable GART TBL walk error reporting, which |
| 1565 | * trips off incorrectly with the IOMMU & 3ware |
| 1566 | * & Cerberus: |
| 1567 | */ |
| 1568 | clear_bit(10, (unsigned long *)&mce_banks[4].ctl); |
| 1569 | } |
| 1570 | if (c->x86 <= 17 && cfg->bootlog < 0) { |
| 1571 | /* |
| 1572 | * Lots of broken BIOS around that don't clear them |
| 1573 | * by default and leave crap in there. Don't log: |
| 1574 | */ |
| 1575 | cfg->bootlog = 0; |
| 1576 | } |
| 1577 | /* |
| 1578 | * Various K7s with broken bank 0 around. Always disable |
| 1579 | * by default. |
| 1580 | */ |
| 1581 | if (c->x86 == 6 && cfg->banks > 0) |
| 1582 | mce_banks[0].ctl = 0; |
| 1583 | |
| 1584 | /* |
| 1585 | * Turn off MC4_MISC thresholding banks on those models since |
| 1586 | * they're not supported there. |
| 1587 | */ |
| 1588 | if (c->x86 == 0x15 && |
| 1589 | (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) { |
| 1590 | int i; |
| 1591 | u64 val, hwcr; |
| 1592 | bool need_toggle; |
| 1593 | u32 msrs[] = { |
| 1594 | 0x00000413, /* MC4_MISC0 */ |
| 1595 | 0xc0000408, /* MC4_MISC1 */ |
| 1596 | }; |
| 1597 | |
| 1598 | rdmsrl(MSR_K7_HWCR, hwcr); |
| 1599 | |
| 1600 | /* McStatusWrEn has to be set */ |
| 1601 | need_toggle = !(hwcr & BIT(18)); |
| 1602 | |
| 1603 | if (need_toggle) |
| 1604 | wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); |
| 1605 | |
| 1606 | for (i = 0; i < ARRAY_SIZE(msrs); i++) { |
| 1607 | rdmsrl(msrs[i], val); |
| 1608 | |
| 1609 | /* CntP bit set? */ |
| 1610 | if (val & BIT_64(62)) { |
| 1611 | val &= ~BIT_64(62); |
| 1612 | wrmsrl(msrs[i], val); |
| 1613 | } |
| 1614 | } |
| 1615 | |
| 1616 | /* restore old settings */ |
| 1617 | if (need_toggle) |
| 1618 | wrmsrl(MSR_K7_HWCR, hwcr); |
| 1619 | } |
| 1620 | } |
| 1621 | |
| 1622 | if (c->x86_vendor == X86_VENDOR_INTEL) { |
| 1623 | /* |
| 1624 | * SDM documents that on family 6 bank 0 should not be written |
| 1625 | * because it aliases to another special BIOS controlled |
| 1626 | * register. |
| 1627 | * But it's not aliased anymore on model 0x1a+ |
| 1628 | * Don't ignore bank 0 completely because there could be a |
| 1629 | * valid event later, merely don't write CTL0. |
| 1630 | */ |
| 1631 | |
| 1632 | if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0) |
| 1633 | mce_banks[0].init = 0; |
| 1634 | |
| 1635 | /* |
| 1636 | * All newer Intel systems support MCE broadcasting. Enable |
| 1637 | * synchronization with a one second timeout. |
| 1638 | */ |
| 1639 | if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && |
| 1640 | cfg->monarch_timeout < 0) |
| 1641 | cfg->monarch_timeout = USEC_PER_SEC; |
| 1642 | |
| 1643 | /* |
| 1644 | * There are also broken BIOSes on some Pentium M and |
| 1645 | * earlier systems: |
| 1646 | */ |
| 1647 | if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0) |
| 1648 | cfg->bootlog = 0; |
| 1649 | |
| 1650 | if (c->x86 == 6 && c->x86_model == 45) |
| 1651 | quirk_no_way_out = quirk_sandybridge_ifu; |
| 1652 | } |
| 1653 | if (cfg->monarch_timeout < 0) |
| 1654 | cfg->monarch_timeout = 0; |
| 1655 | if (cfg->bootlog != 0) |
| 1656 | cfg->panic_timeout = 30; |
| 1657 | |
| 1658 | return 0; |
| 1659 | } |
| 1660 | |
| 1661 | static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) |
| 1662 | { |
| 1663 | if (c->x86 != 5) |
| 1664 | return 0; |
| 1665 | |
| 1666 | switch (c->x86_vendor) { |
| 1667 | case X86_VENDOR_INTEL: |
| 1668 | intel_p5_mcheck_init(c); |
| 1669 | return 1; |
| 1670 | break; |
| 1671 | case X86_VENDOR_CENTAUR: |
| 1672 | winchip_mcheck_init(c); |
| 1673 | return 1; |
| 1674 | break; |
| 1675 | } |
| 1676 | |
| 1677 | return 0; |
| 1678 | } |
| 1679 | |
| 1680 | static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) |
| 1681 | { |
| 1682 | switch (c->x86_vendor) { |
| 1683 | case X86_VENDOR_INTEL: |
| 1684 | mce_intel_feature_init(c); |
| 1685 | mce_adjust_timer = mce_intel_adjust_timer; |
| 1686 | break; |
| 1687 | case X86_VENDOR_AMD: |
| 1688 | mce_amd_feature_init(c); |
| 1689 | break; |
| 1690 | default: |
| 1691 | break; |
| 1692 | } |
| 1693 | } |
| 1694 | |
| 1695 | static void mce_start_timer(unsigned int cpu, struct timer_list *t) |
| 1696 | { |
| 1697 | unsigned long iv = check_interval * HZ; |
| 1698 | |
| 1699 | if (mca_cfg.ignore_ce || !iv) |
| 1700 | return; |
| 1701 | |
| 1702 | per_cpu(mce_next_interval, cpu) = iv; |
| 1703 | |
| 1704 | t->expires = round_jiffies(jiffies + iv); |
| 1705 | add_timer_on(t, cpu); |
| 1706 | } |
| 1707 | |
| 1708 | static void __mcheck_cpu_init_timer(void) |
| 1709 | { |
| 1710 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
| 1711 | unsigned int cpu = smp_processor_id(); |
| 1712 | |
| 1713 | setup_timer(t, mce_timer_fn, cpu); |
| 1714 | mce_start_timer(cpu, t); |
| 1715 | } |
| 1716 | |
| 1717 | /* Handle unconfigured int18 (should never happen) */ |
| 1718 | static void unexpected_machine_check(struct pt_regs *regs, long error_code) |
| 1719 | { |
| 1720 | pr_err("CPU#%d: Unexpected int18 (Machine Check)\n", |
| 1721 | smp_processor_id()); |
| 1722 | } |
| 1723 | |
| 1724 | /* Call the installed machine check handler for this CPU setup. */ |
| 1725 | void (*machine_check_vector)(struct pt_regs *, long error_code) = |
| 1726 | unexpected_machine_check; |
| 1727 | |
| 1728 | /* |
| 1729 | * Called for each booted CPU to set up machine checks. |
| 1730 | * Must be called with preempt off: |
| 1731 | */ |
| 1732 | void mcheck_cpu_init(struct cpuinfo_x86 *c) |
| 1733 | { |
| 1734 | if (mca_cfg.disabled) |
| 1735 | return; |
| 1736 | |
| 1737 | if (__mcheck_cpu_ancient_init(c)) |
| 1738 | return; |
| 1739 | |
| 1740 | if (!mce_available(c)) |
| 1741 | return; |
| 1742 | |
| 1743 | if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) { |
| 1744 | mca_cfg.disabled = true; |
| 1745 | return; |
| 1746 | } |
| 1747 | |
| 1748 | machine_check_vector = do_machine_check; |
| 1749 | |
| 1750 | __mcheck_cpu_init_generic(); |
| 1751 | __mcheck_cpu_init_vendor(c); |
| 1752 | __mcheck_cpu_init_timer(); |
| 1753 | INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work); |
| 1754 | init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb); |
| 1755 | } |
| 1756 | |
| 1757 | /* |
| 1758 | * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log. |
| 1759 | */ |
| 1760 | |
| 1761 | static DEFINE_SPINLOCK(mce_chrdev_state_lock); |
| 1762 | static int mce_chrdev_open_count; /* #times opened */ |
| 1763 | static int mce_chrdev_open_exclu; /* already open exclusive? */ |
| 1764 | |
| 1765 | static int mce_chrdev_open(struct inode *inode, struct file *file) |
| 1766 | { |
| 1767 | spin_lock(&mce_chrdev_state_lock); |
| 1768 | |
| 1769 | if (mce_chrdev_open_exclu || |
| 1770 | (mce_chrdev_open_count && (file->f_flags & O_EXCL))) { |
| 1771 | spin_unlock(&mce_chrdev_state_lock); |
| 1772 | |
| 1773 | return -EBUSY; |
| 1774 | } |
| 1775 | |
| 1776 | if (file->f_flags & O_EXCL) |
| 1777 | mce_chrdev_open_exclu = 1; |
| 1778 | mce_chrdev_open_count++; |
| 1779 | |
| 1780 | spin_unlock(&mce_chrdev_state_lock); |
| 1781 | |
| 1782 | return nonseekable_open(inode, file); |
| 1783 | } |
| 1784 | |
| 1785 | static int mce_chrdev_release(struct inode *inode, struct file *file) |
| 1786 | { |
| 1787 | spin_lock(&mce_chrdev_state_lock); |
| 1788 | |
| 1789 | mce_chrdev_open_count--; |
| 1790 | mce_chrdev_open_exclu = 0; |
| 1791 | |
| 1792 | spin_unlock(&mce_chrdev_state_lock); |
| 1793 | |
| 1794 | return 0; |
| 1795 | } |
| 1796 | |
| 1797 | static void collect_tscs(void *data) |
| 1798 | { |
| 1799 | unsigned long *cpu_tsc = (unsigned long *)data; |
| 1800 | |
| 1801 | rdtscll(cpu_tsc[smp_processor_id()]); |
| 1802 | } |
| 1803 | |
| 1804 | static int mce_apei_read_done; |
| 1805 | |
| 1806 | /* Collect MCE record of previous boot in persistent storage via APEI ERST. */ |
| 1807 | static int __mce_read_apei(char __user **ubuf, size_t usize) |
| 1808 | { |
| 1809 | int rc; |
| 1810 | u64 record_id; |
| 1811 | struct mce m; |
| 1812 | |
| 1813 | if (usize < sizeof(struct mce)) |
| 1814 | return -EINVAL; |
| 1815 | |
| 1816 | rc = apei_read_mce(&m, &record_id); |
| 1817 | /* Error or no more MCE record */ |
| 1818 | if (rc <= 0) { |
| 1819 | mce_apei_read_done = 1; |
| 1820 | /* |
| 1821 | * When ERST is disabled, mce_chrdev_read() should return |
| 1822 | * "no record" instead of "no device." |
| 1823 | */ |
| 1824 | if (rc == -ENODEV) |
| 1825 | return 0; |
| 1826 | return rc; |
| 1827 | } |
| 1828 | rc = -EFAULT; |
| 1829 | if (copy_to_user(*ubuf, &m, sizeof(struct mce))) |
| 1830 | return rc; |
| 1831 | /* |
| 1832 | * In fact, we should have cleared the record after that has |
| 1833 | * been flushed to the disk or sent to network in |
| 1834 | * /sbin/mcelog, but we have no interface to support that now, |
| 1835 | * so just clear it to avoid duplication. |
| 1836 | */ |
| 1837 | rc = apei_clear_mce(record_id); |
| 1838 | if (rc) { |
| 1839 | mce_apei_read_done = 1; |
| 1840 | return rc; |
| 1841 | } |
| 1842 | *ubuf += sizeof(struct mce); |
| 1843 | |
| 1844 | return 0; |
| 1845 | } |
| 1846 | |
| 1847 | static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf, |
| 1848 | size_t usize, loff_t *off) |
| 1849 | { |
| 1850 | char __user *buf = ubuf; |
| 1851 | unsigned long *cpu_tsc; |
| 1852 | unsigned prev, next; |
| 1853 | int i, err; |
| 1854 | |
| 1855 | cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL); |
| 1856 | if (!cpu_tsc) |
| 1857 | return -ENOMEM; |
| 1858 | |
| 1859 | mutex_lock(&mce_chrdev_read_mutex); |
| 1860 | |
| 1861 | if (!mce_apei_read_done) { |
| 1862 | err = __mce_read_apei(&buf, usize); |
| 1863 | if (err || buf != ubuf) |
| 1864 | goto out; |
| 1865 | } |
| 1866 | |
| 1867 | next = rcu_dereference_check_mce(mcelog.next); |
| 1868 | |
| 1869 | /* Only supports full reads right now */ |
| 1870 | err = -EINVAL; |
| 1871 | if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) |
| 1872 | goto out; |
| 1873 | |
| 1874 | err = 0; |
| 1875 | prev = 0; |
| 1876 | do { |
| 1877 | for (i = prev; i < next; i++) { |
| 1878 | unsigned long start = jiffies; |
| 1879 | struct mce *m = &mcelog.entry[i]; |
| 1880 | |
| 1881 | while (!m->finished) { |
| 1882 | if (time_after_eq(jiffies, start + 2)) { |
| 1883 | memset(m, 0, sizeof(*m)); |
| 1884 | goto timeout; |
| 1885 | } |
| 1886 | cpu_relax(); |
| 1887 | } |
| 1888 | smp_rmb(); |
| 1889 | err |= copy_to_user(buf, m, sizeof(*m)); |
| 1890 | buf += sizeof(*m); |
| 1891 | timeout: |
| 1892 | ; |
| 1893 | } |
| 1894 | |
| 1895 | memset(mcelog.entry + prev, 0, |
| 1896 | (next - prev) * sizeof(struct mce)); |
| 1897 | prev = next; |
| 1898 | next = cmpxchg(&mcelog.next, prev, 0); |
| 1899 | } while (next != prev); |
| 1900 | |
| 1901 | synchronize_sched(); |
| 1902 | |
| 1903 | /* |
| 1904 | * Collect entries that were still getting written before the |
| 1905 | * synchronize. |
| 1906 | */ |
| 1907 | on_each_cpu(collect_tscs, cpu_tsc, 1); |
| 1908 | |
| 1909 | for (i = next; i < MCE_LOG_LEN; i++) { |
| 1910 | struct mce *m = &mcelog.entry[i]; |
| 1911 | |
| 1912 | if (m->finished && m->tsc < cpu_tsc[m->cpu]) { |
| 1913 | err |= copy_to_user(buf, m, sizeof(*m)); |
| 1914 | smp_rmb(); |
| 1915 | buf += sizeof(*m); |
| 1916 | memset(m, 0, sizeof(*m)); |
| 1917 | } |
| 1918 | } |
| 1919 | |
| 1920 | if (err) |
| 1921 | err = -EFAULT; |
| 1922 | |
| 1923 | out: |
| 1924 | mutex_unlock(&mce_chrdev_read_mutex); |
| 1925 | kfree(cpu_tsc); |
| 1926 | |
| 1927 | return err ? err : buf - ubuf; |
| 1928 | } |
| 1929 | |
| 1930 | static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait) |
| 1931 | { |
| 1932 | poll_wait(file, &mce_chrdev_wait, wait); |
| 1933 | if (rcu_access_index(mcelog.next)) |
| 1934 | return POLLIN | POLLRDNORM; |
| 1935 | if (!mce_apei_read_done && apei_check_mce()) |
| 1936 | return POLLIN | POLLRDNORM; |
| 1937 | return 0; |
| 1938 | } |
| 1939 | |
| 1940 | static long mce_chrdev_ioctl(struct file *f, unsigned int cmd, |
| 1941 | unsigned long arg) |
| 1942 | { |
| 1943 | int __user *p = (int __user *)arg; |
| 1944 | |
| 1945 | if (!capable(CAP_SYS_ADMIN)) |
| 1946 | return -EPERM; |
| 1947 | |
| 1948 | switch (cmd) { |
| 1949 | case MCE_GET_RECORD_LEN: |
| 1950 | return put_user(sizeof(struct mce), p); |
| 1951 | case MCE_GET_LOG_LEN: |
| 1952 | return put_user(MCE_LOG_LEN, p); |
| 1953 | case MCE_GETCLEAR_FLAGS: { |
| 1954 | unsigned flags; |
| 1955 | |
| 1956 | do { |
| 1957 | flags = mcelog.flags; |
| 1958 | } while (cmpxchg(&mcelog.flags, flags, 0) != flags); |
| 1959 | |
| 1960 | return put_user(flags, p); |
| 1961 | } |
| 1962 | default: |
| 1963 | return -ENOTTY; |
| 1964 | } |
| 1965 | } |
| 1966 | |
| 1967 | static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf, |
| 1968 | size_t usize, loff_t *off); |
| 1969 | |
| 1970 | void register_mce_write_callback(ssize_t (*fn)(struct file *filp, |
| 1971 | const char __user *ubuf, |
| 1972 | size_t usize, loff_t *off)) |
| 1973 | { |
| 1974 | mce_write = fn; |
| 1975 | } |
| 1976 | EXPORT_SYMBOL_GPL(register_mce_write_callback); |
| 1977 | |
| 1978 | ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf, |
| 1979 | size_t usize, loff_t *off) |
| 1980 | { |
| 1981 | if (mce_write) |
| 1982 | return mce_write(filp, ubuf, usize, off); |
| 1983 | else |
| 1984 | return -EINVAL; |
| 1985 | } |
| 1986 | |
| 1987 | static const struct file_operations mce_chrdev_ops = { |
| 1988 | .open = mce_chrdev_open, |
| 1989 | .release = mce_chrdev_release, |
| 1990 | .read = mce_chrdev_read, |
| 1991 | .write = mce_chrdev_write, |
| 1992 | .poll = mce_chrdev_poll, |
| 1993 | .unlocked_ioctl = mce_chrdev_ioctl, |
| 1994 | .llseek = no_llseek, |
| 1995 | }; |
| 1996 | |
| 1997 | static struct miscdevice mce_chrdev_device = { |
| 1998 | MISC_MCELOG_MINOR, |
| 1999 | "mcelog", |
| 2000 | &mce_chrdev_ops, |
| 2001 | }; |
| 2002 | |
| 2003 | static void __mce_disable_bank(void *arg) |
| 2004 | { |
| 2005 | int bank = *((int *)arg); |
| 2006 | __clear_bit(bank, this_cpu_ptr(mce_poll_banks)); |
| 2007 | cmci_disable_bank(bank); |
| 2008 | } |
| 2009 | |
| 2010 | void mce_disable_bank(int bank) |
| 2011 | { |
| 2012 | if (bank >= mca_cfg.banks) { |
| 2013 | pr_warn(FW_BUG |
| 2014 | "Ignoring request to disable invalid MCA bank %d.\n", |
| 2015 | bank); |
| 2016 | return; |
| 2017 | } |
| 2018 | set_bit(bank, mce_banks_ce_disabled); |
| 2019 | on_each_cpu(__mce_disable_bank, &bank, 1); |
| 2020 | } |
| 2021 | |
| 2022 | /* |
| 2023 | * mce=off Disables machine check |
| 2024 | * mce=no_cmci Disables CMCI |
| 2025 | * mce=dont_log_ce Clears corrected events silently, no log created for CEs. |
| 2026 | * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared. |
| 2027 | * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above) |
| 2028 | * monarchtimeout is how long to wait for other CPUs on machine |
| 2029 | * check, or 0 to not wait |
| 2030 | * mce=bootlog Log MCEs from before booting. Disabled by default on AMD. |
| 2031 | * mce=nobootlog Don't log MCEs from before booting. |
| 2032 | * mce=bios_cmci_threshold Don't program the CMCI threshold |
| 2033 | */ |
| 2034 | static int __init mcheck_enable(char *str) |
| 2035 | { |
| 2036 | struct mca_config *cfg = &mca_cfg; |
| 2037 | |
| 2038 | if (*str == 0) { |
| 2039 | enable_p5_mce(); |
| 2040 | return 1; |
| 2041 | } |
| 2042 | if (*str == '=') |
| 2043 | str++; |
| 2044 | if (!strcmp(str, "off")) |
| 2045 | cfg->disabled = true; |
| 2046 | else if (!strcmp(str, "no_cmci")) |
| 2047 | cfg->cmci_disabled = true; |
| 2048 | else if (!strcmp(str, "dont_log_ce")) |
| 2049 | cfg->dont_log_ce = true; |
| 2050 | else if (!strcmp(str, "ignore_ce")) |
| 2051 | cfg->ignore_ce = true; |
| 2052 | else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) |
| 2053 | cfg->bootlog = (str[0] == 'b'); |
| 2054 | else if (!strcmp(str, "bios_cmci_threshold")) |
| 2055 | cfg->bios_cmci_threshold = true; |
| 2056 | else if (isdigit(str[0])) { |
| 2057 | get_option(&str, &(cfg->tolerant)); |
| 2058 | if (*str == ',') { |
| 2059 | ++str; |
| 2060 | get_option(&str, &(cfg->monarch_timeout)); |
| 2061 | } |
| 2062 | } else { |
| 2063 | pr_info("mce argument %s ignored. Please use /sys\n", str); |
| 2064 | return 0; |
| 2065 | } |
| 2066 | return 1; |
| 2067 | } |
| 2068 | __setup("mce", mcheck_enable); |
| 2069 | |
| 2070 | int __init mcheck_init(void) |
| 2071 | { |
| 2072 | mcheck_intel_therm_init(); |
| 2073 | |
| 2074 | return 0; |
| 2075 | } |
| 2076 | |
| 2077 | /* |
| 2078 | * mce_syscore: PM support |
| 2079 | */ |
| 2080 | |
| 2081 | /* |
| 2082 | * Disable machine checks on suspend and shutdown. We can't really handle |
| 2083 | * them later. |
| 2084 | */ |
| 2085 | static int mce_disable_error_reporting(void) |
| 2086 | { |
| 2087 | int i; |
| 2088 | |
| 2089 | for (i = 0; i < mca_cfg.banks; i++) { |
| 2090 | struct mce_bank *b = &mce_banks[i]; |
| 2091 | |
| 2092 | if (b->init) |
| 2093 | wrmsrl(MSR_IA32_MCx_CTL(i), 0); |
| 2094 | } |
| 2095 | return 0; |
| 2096 | } |
| 2097 | |
| 2098 | static int mce_syscore_suspend(void) |
| 2099 | { |
| 2100 | return mce_disable_error_reporting(); |
| 2101 | } |
| 2102 | |
| 2103 | static void mce_syscore_shutdown(void) |
| 2104 | { |
| 2105 | mce_disable_error_reporting(); |
| 2106 | } |
| 2107 | |
| 2108 | /* |
| 2109 | * On resume clear all MCE state. Don't want to see leftovers from the BIOS. |
| 2110 | * Only one CPU is active at this time, the others get re-added later using |
| 2111 | * CPU hotplug: |
| 2112 | */ |
| 2113 | static void mce_syscore_resume(void) |
| 2114 | { |
| 2115 | __mcheck_cpu_init_generic(); |
| 2116 | __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info)); |
| 2117 | } |
| 2118 | |
| 2119 | static struct syscore_ops mce_syscore_ops = { |
| 2120 | .suspend = mce_syscore_suspend, |
| 2121 | .shutdown = mce_syscore_shutdown, |
| 2122 | .resume = mce_syscore_resume, |
| 2123 | }; |
| 2124 | |
| 2125 | /* |
| 2126 | * mce_device: Sysfs support |
| 2127 | */ |
| 2128 | |
| 2129 | static void mce_cpu_restart(void *data) |
| 2130 | { |
| 2131 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
| 2132 | return; |
| 2133 | __mcheck_cpu_init_generic(); |
| 2134 | __mcheck_cpu_init_timer(); |
| 2135 | } |
| 2136 | |
| 2137 | /* Reinit MCEs after user configuration changes */ |
| 2138 | static void mce_restart(void) |
| 2139 | { |
| 2140 | mce_timer_delete_all(); |
| 2141 | on_each_cpu(mce_cpu_restart, NULL, 1); |
| 2142 | } |
| 2143 | |
| 2144 | /* Toggle features for corrected errors */ |
| 2145 | static void mce_disable_cmci(void *data) |
| 2146 | { |
| 2147 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
| 2148 | return; |
| 2149 | cmci_clear(); |
| 2150 | } |
| 2151 | |
| 2152 | static void mce_enable_ce(void *all) |
| 2153 | { |
| 2154 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
| 2155 | return; |
| 2156 | cmci_reenable(); |
| 2157 | cmci_recheck(); |
| 2158 | if (all) |
| 2159 | __mcheck_cpu_init_timer(); |
| 2160 | } |
| 2161 | |
| 2162 | static struct bus_type mce_subsys = { |
| 2163 | .name = "machinecheck", |
| 2164 | .dev_name = "machinecheck", |
| 2165 | }; |
| 2166 | |
| 2167 | DEFINE_PER_CPU(struct device *, mce_device); |
| 2168 | |
| 2169 | void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); |
| 2170 | |
| 2171 | static inline struct mce_bank *attr_to_bank(struct device_attribute *attr) |
| 2172 | { |
| 2173 | return container_of(attr, struct mce_bank, attr); |
| 2174 | } |
| 2175 | |
| 2176 | static ssize_t show_bank(struct device *s, struct device_attribute *attr, |
| 2177 | char *buf) |
| 2178 | { |
| 2179 | return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl); |
| 2180 | } |
| 2181 | |
| 2182 | static ssize_t set_bank(struct device *s, struct device_attribute *attr, |
| 2183 | const char *buf, size_t size) |
| 2184 | { |
| 2185 | u64 new; |
| 2186 | |
| 2187 | if (kstrtou64(buf, 0, &new) < 0) |
| 2188 | return -EINVAL; |
| 2189 | |
| 2190 | attr_to_bank(attr)->ctl = new; |
| 2191 | mce_restart(); |
| 2192 | |
| 2193 | return size; |
| 2194 | } |
| 2195 | |
| 2196 | static ssize_t |
| 2197 | show_trigger(struct device *s, struct device_attribute *attr, char *buf) |
| 2198 | { |
| 2199 | strcpy(buf, mce_helper); |
| 2200 | strcat(buf, "\n"); |
| 2201 | return strlen(mce_helper) + 1; |
| 2202 | } |
| 2203 | |
| 2204 | static ssize_t set_trigger(struct device *s, struct device_attribute *attr, |
| 2205 | const char *buf, size_t siz) |
| 2206 | { |
| 2207 | char *p; |
| 2208 | |
| 2209 | strncpy(mce_helper, buf, sizeof(mce_helper)); |
| 2210 | mce_helper[sizeof(mce_helper)-1] = 0; |
| 2211 | p = strchr(mce_helper, '\n'); |
| 2212 | |
| 2213 | if (p) |
| 2214 | *p = 0; |
| 2215 | |
| 2216 | return strlen(mce_helper) + !!p; |
| 2217 | } |
| 2218 | |
| 2219 | static ssize_t set_ignore_ce(struct device *s, |
| 2220 | struct device_attribute *attr, |
| 2221 | const char *buf, size_t size) |
| 2222 | { |
| 2223 | u64 new; |
| 2224 | |
| 2225 | if (kstrtou64(buf, 0, &new) < 0) |
| 2226 | return -EINVAL; |
| 2227 | |
| 2228 | if (mca_cfg.ignore_ce ^ !!new) { |
| 2229 | if (new) { |
| 2230 | /* disable ce features */ |
| 2231 | mce_timer_delete_all(); |
| 2232 | on_each_cpu(mce_disable_cmci, NULL, 1); |
| 2233 | mca_cfg.ignore_ce = true; |
| 2234 | } else { |
| 2235 | /* enable ce features */ |
| 2236 | mca_cfg.ignore_ce = false; |
| 2237 | on_each_cpu(mce_enable_ce, (void *)1, 1); |
| 2238 | } |
| 2239 | } |
| 2240 | return size; |
| 2241 | } |
| 2242 | |
| 2243 | static ssize_t set_cmci_disabled(struct device *s, |
| 2244 | struct device_attribute *attr, |
| 2245 | const char *buf, size_t size) |
| 2246 | { |
| 2247 | u64 new; |
| 2248 | |
| 2249 | if (kstrtou64(buf, 0, &new) < 0) |
| 2250 | return -EINVAL; |
| 2251 | |
| 2252 | if (mca_cfg.cmci_disabled ^ !!new) { |
| 2253 | if (new) { |
| 2254 | /* disable cmci */ |
| 2255 | on_each_cpu(mce_disable_cmci, NULL, 1); |
| 2256 | mca_cfg.cmci_disabled = true; |
| 2257 | } else { |
| 2258 | /* enable cmci */ |
| 2259 | mca_cfg.cmci_disabled = false; |
| 2260 | on_each_cpu(mce_enable_ce, NULL, 1); |
| 2261 | } |
| 2262 | } |
| 2263 | return size; |
| 2264 | } |
| 2265 | |
| 2266 | static ssize_t store_int_with_restart(struct device *s, |
| 2267 | struct device_attribute *attr, |
| 2268 | const char *buf, size_t size) |
| 2269 | { |
| 2270 | ssize_t ret = device_store_int(s, attr, buf, size); |
| 2271 | mce_restart(); |
| 2272 | return ret; |
| 2273 | } |
| 2274 | |
| 2275 | static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger); |
| 2276 | static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant); |
| 2277 | static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout); |
| 2278 | static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce); |
| 2279 | |
| 2280 | static struct dev_ext_attribute dev_attr_check_interval = { |
| 2281 | __ATTR(check_interval, 0644, device_show_int, store_int_with_restart), |
| 2282 | &check_interval |
| 2283 | }; |
| 2284 | |
| 2285 | static struct dev_ext_attribute dev_attr_ignore_ce = { |
| 2286 | __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce), |
| 2287 | &mca_cfg.ignore_ce |
| 2288 | }; |
| 2289 | |
| 2290 | static struct dev_ext_attribute dev_attr_cmci_disabled = { |
| 2291 | __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled), |
| 2292 | &mca_cfg.cmci_disabled |
| 2293 | }; |
| 2294 | |
| 2295 | static struct device_attribute *mce_device_attrs[] = { |
| 2296 | &dev_attr_tolerant.attr, |
| 2297 | &dev_attr_check_interval.attr, |
| 2298 | &dev_attr_trigger, |
| 2299 | &dev_attr_monarch_timeout.attr, |
| 2300 | &dev_attr_dont_log_ce.attr, |
| 2301 | &dev_attr_ignore_ce.attr, |
| 2302 | &dev_attr_cmci_disabled.attr, |
| 2303 | NULL |
| 2304 | }; |
| 2305 | |
| 2306 | static cpumask_var_t mce_device_initialized; |
| 2307 | |
| 2308 | static void mce_device_release(struct device *dev) |
| 2309 | { |
| 2310 | kfree(dev); |
| 2311 | } |
| 2312 | |
| 2313 | /* Per cpu device init. All of the cpus still share the same ctrl bank: */ |
| 2314 | static int mce_device_create(unsigned int cpu) |
| 2315 | { |
| 2316 | struct device *dev; |
| 2317 | int err; |
| 2318 | int i, j; |
| 2319 | |
| 2320 | if (!mce_available(&boot_cpu_data)) |
| 2321 | return -EIO; |
| 2322 | |
| 2323 | dev = kzalloc(sizeof *dev, GFP_KERNEL); |
| 2324 | if (!dev) |
| 2325 | return -ENOMEM; |
| 2326 | dev->id = cpu; |
| 2327 | dev->bus = &mce_subsys; |
| 2328 | dev->release = &mce_device_release; |
| 2329 | |
| 2330 | err = device_register(dev); |
| 2331 | if (err) { |
| 2332 | put_device(dev); |
| 2333 | return err; |
| 2334 | } |
| 2335 | |
| 2336 | for (i = 0; mce_device_attrs[i]; i++) { |
| 2337 | err = device_create_file(dev, mce_device_attrs[i]); |
| 2338 | if (err) |
| 2339 | goto error; |
| 2340 | } |
| 2341 | for (j = 0; j < mca_cfg.banks; j++) { |
| 2342 | err = device_create_file(dev, &mce_banks[j].attr); |
| 2343 | if (err) |
| 2344 | goto error2; |
| 2345 | } |
| 2346 | cpumask_set_cpu(cpu, mce_device_initialized); |
| 2347 | per_cpu(mce_device, cpu) = dev; |
| 2348 | |
| 2349 | return 0; |
| 2350 | error2: |
| 2351 | while (--j >= 0) |
| 2352 | device_remove_file(dev, &mce_banks[j].attr); |
| 2353 | error: |
| 2354 | while (--i >= 0) |
| 2355 | device_remove_file(dev, mce_device_attrs[i]); |
| 2356 | |
| 2357 | device_unregister(dev); |
| 2358 | |
| 2359 | return err; |
| 2360 | } |
| 2361 | |
| 2362 | static void mce_device_remove(unsigned int cpu) |
| 2363 | { |
| 2364 | struct device *dev = per_cpu(mce_device, cpu); |
| 2365 | int i; |
| 2366 | |
| 2367 | if (!cpumask_test_cpu(cpu, mce_device_initialized)) |
| 2368 | return; |
| 2369 | |
| 2370 | for (i = 0; mce_device_attrs[i]; i++) |
| 2371 | device_remove_file(dev, mce_device_attrs[i]); |
| 2372 | |
| 2373 | for (i = 0; i < mca_cfg.banks; i++) |
| 2374 | device_remove_file(dev, &mce_banks[i].attr); |
| 2375 | |
| 2376 | device_unregister(dev); |
| 2377 | cpumask_clear_cpu(cpu, mce_device_initialized); |
| 2378 | per_cpu(mce_device, cpu) = NULL; |
| 2379 | } |
| 2380 | |
| 2381 | /* Make sure there are no machine checks on offlined CPUs. */ |
| 2382 | static void mce_disable_cpu(void *h) |
| 2383 | { |
| 2384 | unsigned long action = *(unsigned long *)h; |
| 2385 | int i; |
| 2386 | |
| 2387 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
| 2388 | return; |
| 2389 | |
| 2390 | if (!(action & CPU_TASKS_FROZEN)) |
| 2391 | cmci_clear(); |
| 2392 | for (i = 0; i < mca_cfg.banks; i++) { |
| 2393 | struct mce_bank *b = &mce_banks[i]; |
| 2394 | |
| 2395 | if (b->init) |
| 2396 | wrmsrl(MSR_IA32_MCx_CTL(i), 0); |
| 2397 | } |
| 2398 | } |
| 2399 | |
| 2400 | static void mce_reenable_cpu(void *h) |
| 2401 | { |
| 2402 | unsigned long action = *(unsigned long *)h; |
| 2403 | int i; |
| 2404 | |
| 2405 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
| 2406 | return; |
| 2407 | |
| 2408 | if (!(action & CPU_TASKS_FROZEN)) |
| 2409 | cmci_reenable(); |
| 2410 | for (i = 0; i < mca_cfg.banks; i++) { |
| 2411 | struct mce_bank *b = &mce_banks[i]; |
| 2412 | |
| 2413 | if (b->init) |
| 2414 | wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); |
| 2415 | } |
| 2416 | } |
| 2417 | |
| 2418 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
| 2419 | static int |
| 2420 | mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
| 2421 | { |
| 2422 | unsigned int cpu = (unsigned long)hcpu; |
| 2423 | struct timer_list *t = &per_cpu(mce_timer, cpu); |
| 2424 | |
| 2425 | switch (action & ~CPU_TASKS_FROZEN) { |
| 2426 | case CPU_ONLINE: |
| 2427 | mce_device_create(cpu); |
| 2428 | if (threshold_cpu_callback) |
| 2429 | threshold_cpu_callback(action, cpu); |
| 2430 | break; |
| 2431 | case CPU_DEAD: |
| 2432 | if (threshold_cpu_callback) |
| 2433 | threshold_cpu_callback(action, cpu); |
| 2434 | mce_device_remove(cpu); |
| 2435 | mce_intel_hcpu_update(cpu); |
| 2436 | |
| 2437 | /* intentionally ignoring frozen here */ |
| 2438 | if (!(action & CPU_TASKS_FROZEN)) |
| 2439 | cmci_rediscover(); |
| 2440 | break; |
| 2441 | case CPU_DOWN_PREPARE: |
| 2442 | smp_call_function_single(cpu, mce_disable_cpu, &action, 1); |
| 2443 | del_timer_sync(t); |
| 2444 | break; |
| 2445 | case CPU_DOWN_FAILED: |
| 2446 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); |
| 2447 | mce_start_timer(cpu, t); |
| 2448 | break; |
| 2449 | } |
| 2450 | |
| 2451 | return NOTIFY_OK; |
| 2452 | } |
| 2453 | |
| 2454 | static struct notifier_block mce_cpu_notifier = { |
| 2455 | .notifier_call = mce_cpu_callback, |
| 2456 | }; |
| 2457 | |
| 2458 | static __init void mce_init_banks(void) |
| 2459 | { |
| 2460 | int i; |
| 2461 | |
| 2462 | for (i = 0; i < mca_cfg.banks; i++) { |
| 2463 | struct mce_bank *b = &mce_banks[i]; |
| 2464 | struct device_attribute *a = &b->attr; |
| 2465 | |
| 2466 | sysfs_attr_init(&a->attr); |
| 2467 | a->attr.name = b->attrname; |
| 2468 | snprintf(b->attrname, ATTR_LEN, "bank%d", i); |
| 2469 | |
| 2470 | a->attr.mode = 0644; |
| 2471 | a->show = show_bank; |
| 2472 | a->store = set_bank; |
| 2473 | } |
| 2474 | } |
| 2475 | |
| 2476 | static __init int mcheck_init_device(void) |
| 2477 | { |
| 2478 | int err; |
| 2479 | int i = 0; |
| 2480 | |
| 2481 | if (!mce_available(&boot_cpu_data)) { |
| 2482 | err = -EIO; |
| 2483 | goto err_out; |
| 2484 | } |
| 2485 | |
| 2486 | if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) { |
| 2487 | err = -ENOMEM; |
| 2488 | goto err_out; |
| 2489 | } |
| 2490 | |
| 2491 | mce_init_banks(); |
| 2492 | |
| 2493 | err = subsys_system_register(&mce_subsys, NULL); |
| 2494 | if (err) |
| 2495 | goto err_out_mem; |
| 2496 | |
| 2497 | cpu_notifier_register_begin(); |
| 2498 | for_each_online_cpu(i) { |
| 2499 | err = mce_device_create(i); |
| 2500 | if (err) { |
| 2501 | /* |
| 2502 | * Register notifier anyway (and do not unreg it) so |
| 2503 | * that we don't leave undeleted timers, see notifier |
| 2504 | * callback above. |
| 2505 | */ |
| 2506 | __register_hotcpu_notifier(&mce_cpu_notifier); |
| 2507 | cpu_notifier_register_done(); |
| 2508 | goto err_device_create; |
| 2509 | } |
| 2510 | } |
| 2511 | |
| 2512 | __register_hotcpu_notifier(&mce_cpu_notifier); |
| 2513 | cpu_notifier_register_done(); |
| 2514 | |
| 2515 | register_syscore_ops(&mce_syscore_ops); |
| 2516 | |
| 2517 | /* register character device /dev/mcelog */ |
| 2518 | err = misc_register(&mce_chrdev_device); |
| 2519 | if (err) |
| 2520 | goto err_register; |
| 2521 | |
| 2522 | return 0; |
| 2523 | |
| 2524 | err_register: |
| 2525 | unregister_syscore_ops(&mce_syscore_ops); |
| 2526 | |
| 2527 | err_device_create: |
| 2528 | /* |
| 2529 | * We didn't keep track of which devices were created above, but |
| 2530 | * even if we had, the set of online cpus might have changed. |
| 2531 | * Play safe and remove for every possible cpu, since |
| 2532 | * mce_device_remove() will do the right thing. |
| 2533 | */ |
| 2534 | for_each_possible_cpu(i) |
| 2535 | mce_device_remove(i); |
| 2536 | |
| 2537 | err_out_mem: |
| 2538 | free_cpumask_var(mce_device_initialized); |
| 2539 | |
| 2540 | err_out: |
| 2541 | pr_err("Unable to init device /dev/mcelog (rc: %d)\n", err); |
| 2542 | |
| 2543 | return err; |
| 2544 | } |
| 2545 | device_initcall_sync(mcheck_init_device); |
| 2546 | |
| 2547 | /* |
| 2548 | * Old style boot options parsing. Only for compatibility. |
| 2549 | */ |
| 2550 | static int __init mcheck_disable(char *str) |
| 2551 | { |
| 2552 | mca_cfg.disabled = true; |
| 2553 | return 1; |
| 2554 | } |
| 2555 | __setup("nomce", mcheck_disable); |
| 2556 | |
| 2557 | #ifdef CONFIG_DEBUG_FS |
| 2558 | struct dentry *mce_get_debugfs_dir(void) |
| 2559 | { |
| 2560 | static struct dentry *dmce; |
| 2561 | |
| 2562 | if (!dmce) |
| 2563 | dmce = debugfs_create_dir("mce", NULL); |
| 2564 | |
| 2565 | return dmce; |
| 2566 | } |
| 2567 | |
| 2568 | static void mce_reset(void) |
| 2569 | { |
| 2570 | cpu_missing = 0; |
| 2571 | atomic_set(&mce_fake_panicked, 0); |
| 2572 | atomic_set(&mce_executing, 0); |
| 2573 | atomic_set(&mce_callin, 0); |
| 2574 | atomic_set(&global_nwo, 0); |
| 2575 | } |
| 2576 | |
| 2577 | static int fake_panic_get(void *data, u64 *val) |
| 2578 | { |
| 2579 | *val = fake_panic; |
| 2580 | return 0; |
| 2581 | } |
| 2582 | |
| 2583 | static int fake_panic_set(void *data, u64 val) |
| 2584 | { |
| 2585 | mce_reset(); |
| 2586 | fake_panic = val; |
| 2587 | return 0; |
| 2588 | } |
| 2589 | |
| 2590 | DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get, |
| 2591 | fake_panic_set, "%llu\n"); |
| 2592 | |
| 2593 | static int __init mcheck_debugfs_init(void) |
| 2594 | { |
| 2595 | struct dentry *dmce, *ffake_panic; |
| 2596 | |
| 2597 | dmce = mce_get_debugfs_dir(); |
| 2598 | if (!dmce) |
| 2599 | return -ENOMEM; |
| 2600 | ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL, |
| 2601 | &fake_panic_fops); |
| 2602 | if (!ffake_panic) |
| 2603 | return -ENOMEM; |
| 2604 | |
| 2605 | return 0; |
| 2606 | } |
| 2607 | late_initcall(mcheck_debugfs_init); |
| 2608 | #endif |