on_each_cpu(): kill unused 'retry' parameter
[deliverable/linux.git] / arch / x86 / kernel / cpu / mcheck / mce_64.c
1 /*
2 * Machine check handler.
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it.
6 */
7
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/string.h>
13 #include <linux/rcupdate.h>
14 #include <linux/kallsyms.h>
15 #include <linux/sysdev.h>
16 #include <linux/miscdevice.h>
17 #include <linux/fs.h>
18 #include <linux/capability.h>
19 #include <linux/cpu.h>
20 #include <linux/percpu.h>
21 #include <linux/poll.h>
22 #include <linux/thread_info.h>
23 #include <linux/ctype.h>
24 #include <linux/kmod.h>
25 #include <linux/kdebug.h>
26 #include <asm/processor.h>
27 #include <asm/msr.h>
28 #include <asm/mce.h>
29 #include <asm/uaccess.h>
30 #include <asm/smp.h>
31 #include <asm/idle.h>
32
33 #define MISC_MCELOG_MINOR 227
34 #define NR_BANKS 6
35
36 atomic_t mce_entry;
37
38 static int mce_dont_init;
39
40 /*
41 * Tolerant levels:
42 * 0: always panic on uncorrected errors, log corrected errors
43 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
44 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
45 * 3: never panic or SIGBUS, log all errors (for testing only)
46 */
47 static int tolerant = 1;
48 static int banks;
49 static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL };
50 static unsigned long notify_user;
51 static int rip_msr;
52 static int mce_bootlog = -1;
53 static atomic_t mce_events;
54
55 static char trigger[128];
56 static char *trigger_argv[2] = { trigger, NULL };
57
58 static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
59
60 /*
61 * Lockless MCE logging infrastructure.
62 * This avoids deadlocks on printk locks without having to break locks. Also
63 * separate MCEs from kernel messages to avoid bogus bug reports.
64 */
65
66 static struct mce_log mcelog = {
67 MCE_LOG_SIGNATURE,
68 MCE_LOG_LEN,
69 };
70
71 void mce_log(struct mce *mce)
72 {
73 unsigned next, entry;
74 atomic_inc(&mce_events);
75 mce->finished = 0;
76 wmb();
77 for (;;) {
78 entry = rcu_dereference(mcelog.next);
79 for (;;) {
80 /* When the buffer fills up discard new entries. Assume
81 that the earlier errors are the more interesting. */
82 if (entry >= MCE_LOG_LEN) {
83 set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
84 return;
85 }
86 /* Old left over entry. Skip. */
87 if (mcelog.entry[entry].finished) {
88 entry++;
89 continue;
90 }
91 break;
92 }
93 smp_rmb();
94 next = entry + 1;
95 if (cmpxchg(&mcelog.next, entry, next) == entry)
96 break;
97 }
98 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
99 wmb();
100 mcelog.entry[entry].finished = 1;
101 wmb();
102
103 set_bit(0, &notify_user);
104 }
105
106 static void print_mce(struct mce *m)
107 {
108 printk(KERN_EMERG "\n"
109 KERN_EMERG "HARDWARE ERROR\n"
110 KERN_EMERG
111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
112 m->cpu, m->mcgstatus, m->bank, m->status);
113 if (m->ip) {
114 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
115 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
116 m->cs, m->ip);
117 if (m->cs == __KERNEL_CS)
118 print_symbol("{%s}", m->ip);
119 printk("\n");
120 }
121 printk(KERN_EMERG "TSC %Lx ", m->tsc);
122 if (m->addr)
123 printk("ADDR %Lx ", m->addr);
124 if (m->misc)
125 printk("MISC %Lx ", m->misc);
126 printk("\n");
127 printk(KERN_EMERG "This is not a software problem!\n");
128 printk(KERN_EMERG "Run through mcelog --ascii to decode "
129 "and contact your hardware vendor\n");
130 }
131
132 static void mce_panic(char *msg, struct mce *backup, unsigned long start)
133 {
134 int i;
135
136 oops_begin();
137 for (i = 0; i < MCE_LOG_LEN; i++) {
138 unsigned long tsc = mcelog.entry[i].tsc;
139
140 if (time_before(tsc, start))
141 continue;
142 print_mce(&mcelog.entry[i]);
143 if (backup && mcelog.entry[i].tsc == backup->tsc)
144 backup = NULL;
145 }
146 if (backup)
147 print_mce(backup);
148 panic(msg);
149 }
150
151 static int mce_available(struct cpuinfo_x86 *c)
152 {
153 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
154 }
155
156 static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
157 {
158 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
159 m->ip = regs->ip;
160 m->cs = regs->cs;
161 } else {
162 m->ip = 0;
163 m->cs = 0;
164 }
165 if (rip_msr) {
166 /* Assume the RIP in the MSR is exact. Is this true? */
167 m->mcgstatus |= MCG_STATUS_EIPV;
168 rdmsrl(rip_msr, m->ip);
169 m->cs = 0;
170 }
171 }
172
173 /*
174 * The actual machine check handler
175 */
176 void do_machine_check(struct pt_regs * regs, long error_code)
177 {
178 struct mce m, panicm;
179 u64 mcestart = 0;
180 int i;
181 int panicm_found = 0;
182 /*
183 * If no_way_out gets set, there is no safe way to recover from this
184 * MCE. If tolerant is cranked up, we'll try anyway.
185 */
186 int no_way_out = 0;
187 /*
188 * If kill_it gets set, there might be a way to recover from this
189 * error.
190 */
191 int kill_it = 0;
192
193 atomic_inc(&mce_entry);
194
195 if ((regs
196 && notify_die(DIE_NMI, "machine check", regs, error_code,
197 18, SIGKILL) == NOTIFY_STOP)
198 || !banks)
199 goto out2;
200
201 memset(&m, 0, sizeof(struct mce));
202 m.cpu = smp_processor_id();
203 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
204 /* if the restart IP is not valid, we're done for */
205 if (!(m.mcgstatus & MCG_STATUS_RIPV))
206 no_way_out = 1;
207
208 rdtscll(mcestart);
209 barrier();
210
211 for (i = 0; i < banks; i++) {
212 if (!bank[i])
213 continue;
214
215 m.misc = 0;
216 m.addr = 0;
217 m.bank = i;
218 m.tsc = 0;
219
220 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
221 if ((m.status & MCI_STATUS_VAL) == 0)
222 continue;
223
224 if (m.status & MCI_STATUS_EN) {
225 /* if PCC was set, there's no way out */
226 no_way_out |= !!(m.status & MCI_STATUS_PCC);
227 /*
228 * If this error was uncorrectable and there was
229 * an overflow, we're in trouble. If no overflow,
230 * we might get away with just killing a task.
231 */
232 if (m.status & MCI_STATUS_UC) {
233 if (tolerant < 1 || m.status & MCI_STATUS_OVER)
234 no_way_out = 1;
235 kill_it = 1;
236 }
237 }
238
239 if (m.status & MCI_STATUS_MISCV)
240 rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
241 if (m.status & MCI_STATUS_ADDRV)
242 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
243
244 mce_get_rip(&m, regs);
245 if (error_code >= 0)
246 rdtscll(m.tsc);
247 if (error_code != -2)
248 mce_log(&m);
249
250 /* Did this bank cause the exception? */
251 /* Assume that the bank with uncorrectable errors did it,
252 and that there is only a single one. */
253 if ((m.status & MCI_STATUS_UC) && (m.status & MCI_STATUS_EN)) {
254 panicm = m;
255 panicm_found = 1;
256 }
257
258 add_taint(TAINT_MACHINE_CHECK);
259 }
260
261 /* Never do anything final in the polling timer */
262 if (!regs)
263 goto out;
264
265 /* If we didn't find an uncorrectable error, pick
266 the last one (shouldn't happen, just being safe). */
267 if (!panicm_found)
268 panicm = m;
269
270 /*
271 * If we have decided that we just CAN'T continue, and the user
272 * has not set tolerant to an insane level, give up and die.
273 */
274 if (no_way_out && tolerant < 3)
275 mce_panic("Machine check", &panicm, mcestart);
276
277 /*
278 * If the error seems to be unrecoverable, something should be
279 * done. Try to kill as little as possible. If we can kill just
280 * one task, do that. If the user has set the tolerance very
281 * high, don't try to do anything at all.
282 */
283 if (kill_it && tolerant < 3) {
284 int user_space = 0;
285
286 /*
287 * If the EIPV bit is set, it means the saved IP is the
288 * instruction which caused the MCE.
289 */
290 if (m.mcgstatus & MCG_STATUS_EIPV)
291 user_space = panicm.ip && (panicm.cs & 3);
292
293 /*
294 * If we know that the error was in user space, send a
295 * SIGBUS. Otherwise, panic if tolerance is low.
296 *
297 * do_exit() takes an awful lot of locks and has a slight
298 * risk of deadlocking.
299 */
300 if (user_space) {
301 do_exit(SIGBUS);
302 } else if (panic_on_oops || tolerant < 2) {
303 mce_panic("Uncorrected machine check",
304 &panicm, mcestart);
305 }
306 }
307
308 /* notify userspace ASAP */
309 set_thread_flag(TIF_MCE_NOTIFY);
310
311 out:
312 /* the last thing we do is clear state */
313 for (i = 0; i < banks; i++)
314 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
315 wrmsrl(MSR_IA32_MCG_STATUS, 0);
316 out2:
317 atomic_dec(&mce_entry);
318 }
319
320 #ifdef CONFIG_X86_MCE_INTEL
321 /***
322 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
323 * @cpu: The CPU on which the event occurred.
324 * @status: Event status information
325 *
326 * This function should be called by the thermal interrupt after the
327 * event has been processed and the decision was made to log the event
328 * further.
329 *
330 * The status parameter will be saved to the 'status' field of 'struct mce'
331 * and historically has been the register value of the
332 * MSR_IA32_THERMAL_STATUS (Intel) msr.
333 */
334 void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
335 {
336 struct mce m;
337
338 memset(&m, 0, sizeof(m));
339 m.cpu = cpu;
340 m.bank = MCE_THERMAL_BANK;
341 m.status = status;
342 rdtscll(m.tsc);
343 mce_log(&m);
344 }
345 #endif /* CONFIG_X86_MCE_INTEL */
346
347 /*
348 * Periodic polling timer for "silent" machine check errors. If the
349 * poller finds an MCE, poll 2x faster. When the poller finds no more
350 * errors, poll 2x slower (up to check_interval seconds).
351 */
352
353 static int check_interval = 5 * 60; /* 5 minutes */
354 static int next_interval; /* in jiffies */
355 static void mcheck_timer(struct work_struct *work);
356 static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
357
358 static void mcheck_check_cpu(void *info)
359 {
360 if (mce_available(&current_cpu_data))
361 do_machine_check(NULL, 0);
362 }
363
364 static void mcheck_timer(struct work_struct *work)
365 {
366 on_each_cpu(mcheck_check_cpu, NULL, 1);
367
368 /*
369 * Alert userspace if needed. If we logged an MCE, reduce the
370 * polling interval, otherwise increase the polling interval.
371 */
372 if (mce_notify_user()) {
373 next_interval = max(next_interval/2, HZ/100);
374 } else {
375 next_interval = min(next_interval * 2,
376 (int)round_jiffies_relative(check_interval*HZ));
377 }
378
379 schedule_delayed_work(&mcheck_work, next_interval);
380 }
381
382 /*
383 * This is only called from process context. This is where we do
384 * anything we need to alert userspace about new MCEs. This is called
385 * directly from the poller and also from entry.S and idle, thanks to
386 * TIF_MCE_NOTIFY.
387 */
388 int mce_notify_user(void)
389 {
390 clear_thread_flag(TIF_MCE_NOTIFY);
391 if (test_and_clear_bit(0, &notify_user)) {
392 static unsigned long last_print;
393 unsigned long now = jiffies;
394
395 wake_up_interruptible(&mce_wait);
396 if (trigger[0])
397 call_usermodehelper(trigger, trigger_argv, NULL,
398 UMH_NO_WAIT);
399
400 if (time_after_eq(now, last_print + (check_interval*HZ))) {
401 last_print = now;
402 printk(KERN_INFO "Machine check events logged\n");
403 }
404
405 return 1;
406 }
407 return 0;
408 }
409
410 /* see if the idle task needs to notify userspace */
411 static int
412 mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk)
413 {
414 /* IDLE_END should be safe - interrupts are back on */
415 if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY))
416 mce_notify_user();
417
418 return NOTIFY_OK;
419 }
420
421 static struct notifier_block mce_idle_notifier = {
422 .notifier_call = mce_idle_callback,
423 };
424
425 static __init int periodic_mcheck_init(void)
426 {
427 next_interval = check_interval * HZ;
428 if (next_interval)
429 schedule_delayed_work(&mcheck_work,
430 round_jiffies_relative(next_interval));
431 idle_notifier_register(&mce_idle_notifier);
432 return 0;
433 }
434 __initcall(periodic_mcheck_init);
435
436
437 /*
438 * Initialize Machine Checks for a CPU.
439 */
440 static void mce_init(void *dummy)
441 {
442 u64 cap;
443 int i;
444
445 rdmsrl(MSR_IA32_MCG_CAP, cap);
446 banks = cap & 0xff;
447 if (banks > NR_BANKS) {
448 printk(KERN_INFO "MCE: warning: using only %d banks\n", banks);
449 banks = NR_BANKS;
450 }
451 /* Use accurate RIP reporting if available. */
452 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
453 rip_msr = MSR_IA32_MCG_EIP;
454
455 /* Log the machine checks left over from the previous reset.
456 This also clears all registers */
457 do_machine_check(NULL, mce_bootlog ? -1 : -2);
458
459 set_in_cr4(X86_CR4_MCE);
460
461 if (cap & MCG_CTL_P)
462 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
463
464 for (i = 0; i < banks; i++) {
465 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
466 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
467 }
468 }
469
470 /* Add per CPU specific workarounds here */
471 static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
472 {
473 /* This should be disabled by the BIOS, but isn't always */
474 if (c->x86_vendor == X86_VENDOR_AMD) {
475 if(c->x86 == 15)
476 /* disable GART TBL walk error reporting, which trips off
477 incorrectly with the IOMMU & 3ware & Cerberus. */
478 clear_bit(10, &bank[4]);
479 if(c->x86 <= 17 && mce_bootlog < 0)
480 /* Lots of broken BIOS around that don't clear them
481 by default and leave crap in there. Don't log. */
482 mce_bootlog = 0;
483 }
484
485 }
486
487 static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
488 {
489 switch (c->x86_vendor) {
490 case X86_VENDOR_INTEL:
491 mce_intel_feature_init(c);
492 break;
493 case X86_VENDOR_AMD:
494 mce_amd_feature_init(c);
495 break;
496 default:
497 break;
498 }
499 }
500
501 /*
502 * Called for each booted CPU to set up machine checks.
503 * Must be called with preempt off.
504 */
505 void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
506 {
507 static cpumask_t mce_cpus = CPU_MASK_NONE;
508
509 mce_cpu_quirks(c);
510
511 if (mce_dont_init ||
512 cpu_test_and_set(smp_processor_id(), mce_cpus) ||
513 !mce_available(c))
514 return;
515
516 mce_init(NULL);
517 mce_cpu_features(c);
518 }
519
520 /*
521 * Character device to read and clear the MCE log.
522 */
523
524 static DEFINE_SPINLOCK(mce_state_lock);
525 static int open_count; /* #times opened */
526 static int open_exclu; /* already open exclusive? */
527
528 static int mce_open(struct inode *inode, struct file *file)
529 {
530 spin_lock(&mce_state_lock);
531
532 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
533 spin_unlock(&mce_state_lock);
534 return -EBUSY;
535 }
536
537 if (file->f_flags & O_EXCL)
538 open_exclu = 1;
539 open_count++;
540
541 spin_unlock(&mce_state_lock);
542
543 return nonseekable_open(inode, file);
544 }
545
546 static int mce_release(struct inode *inode, struct file *file)
547 {
548 spin_lock(&mce_state_lock);
549
550 open_count--;
551 open_exclu = 0;
552
553 spin_unlock(&mce_state_lock);
554
555 return 0;
556 }
557
558 static void collect_tscs(void *data)
559 {
560 unsigned long *cpu_tsc = (unsigned long *)data;
561
562 rdtscll(cpu_tsc[smp_processor_id()]);
563 }
564
565 static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
566 loff_t *off)
567 {
568 unsigned long *cpu_tsc;
569 static DEFINE_MUTEX(mce_read_mutex);
570 unsigned next;
571 char __user *buf = ubuf;
572 int i, err;
573
574 cpu_tsc = kmalloc(NR_CPUS * sizeof(long), GFP_KERNEL);
575 if (!cpu_tsc)
576 return -ENOMEM;
577
578 mutex_lock(&mce_read_mutex);
579 next = rcu_dereference(mcelog.next);
580
581 /* Only supports full reads right now */
582 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
583 mutex_unlock(&mce_read_mutex);
584 kfree(cpu_tsc);
585 return -EINVAL;
586 }
587
588 err = 0;
589 for (i = 0; i < next; i++) {
590 unsigned long start = jiffies;
591
592 while (!mcelog.entry[i].finished) {
593 if (time_after_eq(jiffies, start + 2)) {
594 memset(mcelog.entry + i,0, sizeof(struct mce));
595 goto timeout;
596 }
597 cpu_relax();
598 }
599 smp_rmb();
600 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
601 buf += sizeof(struct mce);
602 timeout:
603 ;
604 }
605
606 memset(mcelog.entry, 0, next * sizeof(struct mce));
607 mcelog.next = 0;
608
609 synchronize_sched();
610
611 /*
612 * Collect entries that were still getting written before the
613 * synchronize.
614 */
615 on_each_cpu(collect_tscs, cpu_tsc, 1);
616 for (i = next; i < MCE_LOG_LEN; i++) {
617 if (mcelog.entry[i].finished &&
618 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
619 err |= copy_to_user(buf, mcelog.entry+i,
620 sizeof(struct mce));
621 smp_rmb();
622 buf += sizeof(struct mce);
623 memset(&mcelog.entry[i], 0, sizeof(struct mce));
624 }
625 }
626 mutex_unlock(&mce_read_mutex);
627 kfree(cpu_tsc);
628 return err ? -EFAULT : buf - ubuf;
629 }
630
631 static unsigned int mce_poll(struct file *file, poll_table *wait)
632 {
633 poll_wait(file, &mce_wait, wait);
634 if (rcu_dereference(mcelog.next))
635 return POLLIN | POLLRDNORM;
636 return 0;
637 }
638
639 static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
640 {
641 int __user *p = (int __user *)arg;
642
643 if (!capable(CAP_SYS_ADMIN))
644 return -EPERM;
645 switch (cmd) {
646 case MCE_GET_RECORD_LEN:
647 return put_user(sizeof(struct mce), p);
648 case MCE_GET_LOG_LEN:
649 return put_user(MCE_LOG_LEN, p);
650 case MCE_GETCLEAR_FLAGS: {
651 unsigned flags;
652
653 do {
654 flags = mcelog.flags;
655 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
656 return put_user(flags, p);
657 }
658 default:
659 return -ENOTTY;
660 }
661 }
662
663 static const struct file_operations mce_chrdev_ops = {
664 .open = mce_open,
665 .release = mce_release,
666 .read = mce_read,
667 .poll = mce_poll,
668 .unlocked_ioctl = mce_ioctl,
669 };
670
671 static struct miscdevice mce_log_device = {
672 MISC_MCELOG_MINOR,
673 "mcelog",
674 &mce_chrdev_ops,
675 };
676
677 static unsigned long old_cr4 __initdata;
678
679 void __init stop_mce(void)
680 {
681 old_cr4 = read_cr4();
682 clear_in_cr4(X86_CR4_MCE);
683 }
684
685 void __init restart_mce(void)
686 {
687 if (old_cr4 & X86_CR4_MCE)
688 set_in_cr4(X86_CR4_MCE);
689 }
690
691 /*
692 * Old style boot options parsing. Only for compatibility.
693 */
694 static int __init mcheck_disable(char *str)
695 {
696 mce_dont_init = 1;
697 return 1;
698 }
699
700 /* mce=off disables machine check. Note you can re-enable it later
701 using sysfs.
702 mce=TOLERANCELEVEL (number, see above)
703 mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
704 mce=nobootlog Don't log MCEs from before booting. */
705 static int __init mcheck_enable(char *str)
706 {
707 if (!strcmp(str, "off"))
708 mce_dont_init = 1;
709 else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog"))
710 mce_bootlog = str[0] == 'b';
711 else if (isdigit(str[0]))
712 get_option(&str, &tolerant);
713 else
714 printk("mce= argument %s ignored. Please use /sys", str);
715 return 1;
716 }
717
718 __setup("nomce", mcheck_disable);
719 __setup("mce=", mcheck_enable);
720
721 /*
722 * Sysfs support
723 */
724
725 /* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
726 Only one CPU is active at this time, the others get readded later using
727 CPU hotplug. */
728 static int mce_resume(struct sys_device *dev)
729 {
730 mce_init(NULL);
731 return 0;
732 }
733
734 /* Reinit MCEs after user configuration changes */
735 static void mce_restart(void)
736 {
737 if (next_interval)
738 cancel_delayed_work(&mcheck_work);
739 /* Timer race is harmless here */
740 on_each_cpu(mce_init, NULL, 1);
741 next_interval = check_interval * HZ;
742 if (next_interval)
743 schedule_delayed_work(&mcheck_work,
744 round_jiffies_relative(next_interval));
745 }
746
747 static struct sysdev_class mce_sysclass = {
748 .resume = mce_resume,
749 .name = "machinecheck",
750 };
751
752 DEFINE_PER_CPU(struct sys_device, device_mce);
753
754 /* Why are there no generic functions for this? */
755 #define ACCESSOR(name, var, start) \
756 static ssize_t show_ ## name(struct sys_device *s, char *buf) { \
757 return sprintf(buf, "%lx\n", (unsigned long)var); \
758 } \
759 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \
760 char *end; \
761 unsigned long new = simple_strtoul(buf, &end, 0); \
762 if (end == buf) return -EINVAL; \
763 var = new; \
764 start; \
765 return end-buf; \
766 } \
767 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
768
769 /* TBD should generate these dynamically based on number of available banks */
770 ACCESSOR(bank0ctl,bank[0],mce_restart())
771 ACCESSOR(bank1ctl,bank[1],mce_restart())
772 ACCESSOR(bank2ctl,bank[2],mce_restart())
773 ACCESSOR(bank3ctl,bank[3],mce_restart())
774 ACCESSOR(bank4ctl,bank[4],mce_restart())
775 ACCESSOR(bank5ctl,bank[5],mce_restart())
776
777 static ssize_t show_trigger(struct sys_device *s, char *buf)
778 {
779 strcpy(buf, trigger);
780 strcat(buf, "\n");
781 return strlen(trigger) + 1;
782 }
783
784 static ssize_t set_trigger(struct sys_device *s,const char *buf,size_t siz)
785 {
786 char *p;
787 int len;
788 strncpy(trigger, buf, sizeof(trigger));
789 trigger[sizeof(trigger)-1] = 0;
790 len = strlen(trigger);
791 p = strchr(trigger, '\n');
792 if (*p) *p = 0;
793 return len;
794 }
795
796 static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
797 ACCESSOR(tolerant,tolerant,)
798 ACCESSOR(check_interval,check_interval,mce_restart())
799 static struct sysdev_attribute *mce_attributes[] = {
800 &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
801 &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl,
802 &attr_tolerant, &attr_check_interval, &attr_trigger,
803 NULL
804 };
805
806 static cpumask_t mce_device_initialized = CPU_MASK_NONE;
807
808 /* Per cpu sysdev init. All of the cpus still share the same ctl bank */
809 static __cpuinit int mce_create_device(unsigned int cpu)
810 {
811 int err;
812 int i;
813
814 if (!mce_available(&boot_cpu_data))
815 return -EIO;
816
817 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
818 per_cpu(device_mce,cpu).id = cpu;
819 per_cpu(device_mce,cpu).cls = &mce_sysclass;
820
821 err = sysdev_register(&per_cpu(device_mce,cpu));
822 if (err)
823 return err;
824
825 for (i = 0; mce_attributes[i]; i++) {
826 err = sysdev_create_file(&per_cpu(device_mce,cpu),
827 mce_attributes[i]);
828 if (err)
829 goto error;
830 }
831 cpu_set(cpu, mce_device_initialized);
832
833 return 0;
834 error:
835 while (i--) {
836 sysdev_remove_file(&per_cpu(device_mce,cpu),
837 mce_attributes[i]);
838 }
839 sysdev_unregister(&per_cpu(device_mce,cpu));
840
841 return err;
842 }
843
844 static void mce_remove_device(unsigned int cpu)
845 {
846 int i;
847
848 if (!cpu_isset(cpu, mce_device_initialized))
849 return;
850
851 for (i = 0; mce_attributes[i]; i++)
852 sysdev_remove_file(&per_cpu(device_mce,cpu),
853 mce_attributes[i]);
854 sysdev_unregister(&per_cpu(device_mce,cpu));
855 cpu_clear(cpu, mce_device_initialized);
856 }
857
858 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
859 static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
860 unsigned long action, void *hcpu)
861 {
862 unsigned int cpu = (unsigned long)hcpu;
863
864 switch (action) {
865 case CPU_ONLINE:
866 case CPU_ONLINE_FROZEN:
867 mce_create_device(cpu);
868 break;
869 case CPU_DEAD:
870 case CPU_DEAD_FROZEN:
871 mce_remove_device(cpu);
872 break;
873 }
874 return NOTIFY_OK;
875 }
876
877 static struct notifier_block mce_cpu_notifier __cpuinitdata = {
878 .notifier_call = mce_cpu_callback,
879 };
880
881 static __init int mce_init_device(void)
882 {
883 int err;
884 int i = 0;
885
886 if (!mce_available(&boot_cpu_data))
887 return -EIO;
888 err = sysdev_class_register(&mce_sysclass);
889 if (err)
890 return err;
891
892 for_each_online_cpu(i) {
893 err = mce_create_device(i);
894 if (err)
895 return err;
896 }
897
898 register_hotcpu_notifier(&mce_cpu_notifier);
899 misc_register(&mce_log_device);
900 return err;
901 }
902
903 device_initcall(mce_init_device);
This page took 0.071532 seconds and 5 git commands to generate.