x86, mce: Replace MCE_SELF_VECTOR by irq_work
[deliverable/linux.git] / arch / x86 / kernel / cpu / mcheck / mce.c
index ff1ae9b6464d80d4663b8bc09124edbe6992ea7f..e81d48b05618e37246cb62bd594c461d223330e9 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/thread_info.h>
 #include <linux/capability.h>
 #include <linux/miscdevice.h>
-#include <linux/interrupt.h>
 #include <linux/ratelimit.h>
 #include <linux/kallsyms.h>
 #include <linux/rcupdate.h>
 #include <linux/mm.h>
 #include <linux/debugfs.h>
 #include <linux/edac_mce.h>
+#include <linux/irq_work.h>
 
 #include <asm/processor.h>
-#include <asm/hw_irq.h>
-#include <asm/apic.h>
-#include <asm/idle.h>
-#include <asm/ipi.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
 
@@ -461,22 +457,13 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
                m->ip = mce_rdmsrl(rip_msr);
 }
 
-#ifdef CONFIG_X86_LOCAL_APIC
-/*
- * Called after interrupts have been reenabled again
- * when a MCE happened during an interrupts off region
- * in the kernel.
- */
-asmlinkage void smp_mce_self_interrupt(struct pt_regs *regs)
+DEFINE_PER_CPU(struct irq_work, mce_irq_work);
+
+static void mce_irq_work_cb(struct irq_work *entry)
 {
-       ack_APIC_irq();
-       exit_idle();
-       irq_enter();
        mce_notify_irq();
        mce_schedule_work();
-       irq_exit();
 }
-#endif
 
 static void mce_report_event(struct pt_regs *regs)
 {
@@ -492,29 +479,7 @@ static void mce_report_event(struct pt_regs *regs)
                return;
        }
 
-#ifdef CONFIG_X86_LOCAL_APIC
-       /*
-        * Without APIC do not notify. The event will be picked
-        * up eventually.
-        */
-       if (!cpu_has_apic)
-               return;
-
-       /*
-        * When interrupts are disabled we cannot use
-        * kernel services safely. Trigger an self interrupt
-        * through the APIC to instead do the notification
-        * after interrupts are reenabled again.
-        */
-       apic->send_IPI_self(MCE_SELF_VECTOR);
-
-       /*
-        * Wait for idle afterwards again so that we don't leave the
-        * APIC in a non idle state because the normal APIC writes
-        * cannot exclude us.
-        */
-       apic_wait_icr_idle();
-#endif
+       irq_work_queue(&__get_cpu_var(mce_irq_work));
 }
 
 DEFINE_PER_CPU(unsigned, mce_poll_count);
@@ -1444,7 +1409,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
        __mcheck_cpu_init_vendor(c);
        __mcheck_cpu_init_timer();
        INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
-
+       init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb);
 }
 
 /*
This page took 0.042732 seconds and 5 git commands to generate.