x86: Remove the ancient and deprecated disable_hlt() and enable_hlt() facility
[deliverable/linux.git] / arch / x86 / kernel / process.c
index 15763af7bfe3b6202af28810c3aafbeaf14d2a0c..1d92a5ab6e8b939613d6bfd7221133df2496b482 100644 (file)
 #include <linux/user-return-notifier.h>
 #include <linux/dmi.h>
 #include <linux/utsname.h>
+#include <linux/stackprotector.h>
+#include <linux/tick.h>
+#include <linux/cpuidle.h>
 #include <trace/events/power.h>
 #include <linux/hw_breakpoint.h>
 #include <asm/cpu.h>
-#include <asm/system.h>
 #include <asm/apic.h>
 #include <asm/syscalls.h>
 #include <asm/idle.h>
 #include <asm/uaccess.h>
 #include <asm/i387.h>
+#include <asm/fpu-internal.h>
 #include <asm/debugreg.h>
+#include <asm/nmi.h>
+
+#ifdef CONFIG_X86_64
+static DEFINE_PER_CPU(unsigned char, is_idle);
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+       atomic_notifier_chain_register(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+       atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+#endif
 
 struct kmem_cache *task_xstate_cachep;
 EXPORT_SYMBOL_GPL(task_xstate_cachep);
@@ -341,35 +362,104 @@ void (*pm_idle)(void);
 EXPORT_SYMBOL(pm_idle);
 #endif
 
-#ifdef CONFIG_X86_32
-/*
- * This halt magic was a workaround for ancient floppy DMA
- * wreckage. It should be safe to remove.
- */
-static int hlt_counter;
-void disable_hlt(void)
+static inline int hlt_use_halt(void)
 {
-       hlt_counter++;
+       return 1;
 }
-EXPORT_SYMBOL(disable_hlt);
 
-void enable_hlt(void)
+#ifndef CONFIG_SMP
+static inline void play_dead(void)
 {
-       hlt_counter--;
+       BUG();
 }
-EXPORT_SYMBOL(enable_hlt);
+#endif
 
-static inline int hlt_use_halt(void)
+#ifdef CONFIG_X86_64
+void enter_idle(void)
 {
-       return (!hlt_counter && boot_cpu_data.hlt_works_ok);
+       percpu_write(is_idle, 1);
+       atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
 }
-#else
-static inline int hlt_use_halt(void)
+
+static void __exit_idle(void)
 {
-       return 1;
+       if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
+               return;
+       atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+}
+
+/* Called from interrupts to signify idle end */
+void exit_idle(void)
+{
+       /* idle loop has pid 0 */
+       if (current->pid)
+               return;
+       __exit_idle();
 }
 #endif
 
+/*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+void cpu_idle(void)
+{
+       /*
+        * If we're the non-boot CPU, nothing set the stack canary up
+        * for us.  CPU0 already has it initialized but no harm in
+        * doing it again.  This is a good place for updating it, as
+        * we wont ever return from this function (so the invalid
+        * canaries already on the stack wont ever trigger).
+        */
+       boot_init_stack_canary();
+       current_thread_info()->status |= TS_POLLING;
+
+       while (1) {
+               tick_nohz_idle_enter();
+
+               while (!need_resched()) {
+                       rmb();
+
+                       if (cpu_is_offline(smp_processor_id()))
+                               play_dead();
+
+                       /*
+                        * Idle routines should keep interrupts disabled
+                        * from here on, until they go to idle.
+                        * Otherwise, idle callbacks can misfire.
+                        */
+                       local_touch_nmi();
+                       local_irq_disable();
+
+                       enter_idle();
+
+                       /* Don't trace irqs off for idle */
+                       stop_critical_timings();
+
+                       /* enter_idle() needs rcu for notifiers */
+                       rcu_idle_enter();
+
+                       if (cpuidle_idle_call())
+                               pm_idle();
+
+                       rcu_idle_exit();
+                       start_critical_timings();
+
+                       /* In many cases the interrupt that ended idle
+                          has already called exit_idle. But some idle
+                          loops can be woken up without interrupt. */
+                       __exit_idle();
+               }
+
+               tick_nohz_idle_exit();
+               preempt_enable_no_resched();
+               schedule();
+               preempt_disable();
+       }
+}
+
 /*
  * We use this if we don't have any better
  * idle routine..
@@ -377,8 +467,8 @@ static inline int hlt_use_halt(void)
 void default_idle(void)
 {
        if (hlt_use_halt()) {
-               trace_power_start(POWER_CSTATE, 1, smp_processor_id());
-               trace_cpu_idle(1, smp_processor_id());
+               trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
+               trace_cpu_idle_rcuidle(1, smp_processor_id());
                current_thread_info()->status &= ~TS_POLLING;
                /*
                 * TS_POLLING-cleared state must be visible before we
@@ -391,8 +481,8 @@ void default_idle(void)
                else
                        local_irq_enable();
                current_thread_info()->status |= TS_POLLING;
-               trace_power_end(smp_processor_id());
-               trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
+               trace_power_end_rcuidle(smp_processor_id());
+               trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        } else {
                local_irq_enable();
                /* loop is done by the caller */
@@ -450,8 +540,8 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
 static void mwait_idle(void)
 {
        if (!need_resched()) {
-               trace_power_start(POWER_CSTATE, 1, smp_processor_id());
-               trace_cpu_idle(1, smp_processor_id());
+               trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
+               trace_cpu_idle_rcuidle(1, smp_processor_id());
                if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
                        clflush((void *)&current_thread_info()->flags);
 
@@ -461,8 +551,8 @@ static void mwait_idle(void)
                        __sti_mwait(0, 0);
                else
                        local_irq_enable();
-               trace_power_end(smp_processor_id());
-               trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
+               trace_power_end_rcuidle(smp_processor_id());
+               trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        } else
                local_irq_enable();
 }
@@ -474,13 +564,13 @@ static void mwait_idle(void)
  */
 static void poll_idle(void)
 {
-       trace_power_start(POWER_CSTATE, 0, smp_processor_id());
-       trace_cpu_idle(0, smp_processor_id());
+       trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
+       trace_cpu_idle_rcuidle(0, smp_processor_id());
        local_irq_enable();
        while (!need_resched())
                cpu_relax();
-       trace_power_end(smp_processor_id());
-       trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
+       trace_power_end_rcuidle(smp_processor_id());
+       trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 }
 
 /*
This page took 0.030586 seconds and 5 git commands to generate.