1 #include <linux/errno.h>
2 #include <linux/kernel.h>
5 #include <linux/prctl.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/module.h>
10 #include <linux/clockchips.h>
11 #include <linux/random.h>
12 #include <linux/user-return-notifier.h>
13 #include <linux/dmi.h>
14 #include <linux/utsname.h>
15 #include <linux/stackprotector.h>
16 #include <linux/tick.h>
17 #include <linux/cpuidle.h>
18 #include <trace/events/power.h>
19 #include <linux/hw_breakpoint.h>
22 #include <asm/syscalls.h>
24 #include <asm/uaccess.h>
26 #include <asm/fpu-internal.h>
27 #include <asm/debugreg.h>
31 static DEFINE_PER_CPU(unsigned char, is_idle
);
32 static ATOMIC_NOTIFIER_HEAD(idle_notifier
);
34 void idle_notifier_register(struct notifier_block
*n
)
36 atomic_notifier_chain_register(&idle_notifier
, n
);
38 EXPORT_SYMBOL_GPL(idle_notifier_register
);
40 void idle_notifier_unregister(struct notifier_block
*n
)
42 atomic_notifier_chain_unregister(&idle_notifier
, n
);
44 EXPORT_SYMBOL_GPL(idle_notifier_unregister
);
47 struct kmem_cache
*task_xstate_cachep
;
48 EXPORT_SYMBOL_GPL(task_xstate_cachep
);
50 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
55 if (fpu_allocated(&src
->thread
.fpu
)) {
56 memset(&dst
->thread
.fpu
, 0, sizeof(dst
->thread
.fpu
));
57 ret
= fpu_alloc(&dst
->thread
.fpu
);
60 fpu_copy(&dst
->thread
.fpu
, &src
->thread
.fpu
);
65 void free_thread_xstate(struct task_struct
*tsk
)
67 fpu_free(&tsk
->thread
.fpu
);
70 void free_thread_info(struct thread_info
*ti
)
72 free_thread_xstate(ti
->task
);
73 free_pages((unsigned long)ti
, THREAD_ORDER
);
76 void arch_task_cache_init(void)
79 kmem_cache_create("task_xstate", xstate_size
,
80 __alignof__(union thread_xstate
),
81 SLAB_PANIC
| SLAB_NOTRACK
, NULL
);
85 * Free current thread data structures etc..
87 void exit_thread(void)
89 struct task_struct
*me
= current
;
90 struct thread_struct
*t
= &me
->thread
;
91 unsigned long *bp
= t
->io_bitmap_ptr
;
94 struct tss_struct
*tss
= &per_cpu(init_tss
, get_cpu());
96 t
->io_bitmap_ptr
= NULL
;
97 clear_thread_flag(TIF_IO_BITMAP
);
99 * Careful, clear this in the TSS too:
101 memset(tss
->io_bitmap
, 0xff, t
->io_bitmap_max
);
102 t
->io_bitmap_max
= 0;
108 void show_regs(struct pt_regs
*regs
)
110 show_registers(regs
);
111 show_trace(NULL
, regs
, (unsigned long *)kernel_stack_pointer(regs
), 0);
114 void show_regs_common(void)
116 const char *vendor
, *product
, *board
;
118 vendor
= dmi_get_system_info(DMI_SYS_VENDOR
);
121 product
= dmi_get_system_info(DMI_PRODUCT_NAME
);
125 /* Board Name is optional */
126 board
= dmi_get_system_info(DMI_BOARD_NAME
);
128 printk(KERN_CONT
"\n");
129 printk(KERN_DEFAULT
"Pid: %d, comm: %.20s %s %s %.*s",
130 current
->pid
, current
->comm
, print_tainted(),
131 init_utsname()->release
,
132 (int)strcspn(init_utsname()->version
, " "),
133 init_utsname()->version
);
134 printk(KERN_CONT
" %s %s", vendor
, product
);
136 printk(KERN_CONT
"/%s", board
);
137 printk(KERN_CONT
"\n");
140 void flush_thread(void)
142 struct task_struct
*tsk
= current
;
144 flush_ptrace_hw_breakpoint(tsk
);
145 memset(tsk
->thread
.tls_array
, 0, sizeof(tsk
->thread
.tls_array
));
147 * Forget coprocessor state..
149 tsk
->fpu_counter
= 0;
154 static void hard_disable_TSC(void)
156 write_cr4(read_cr4() | X86_CR4_TSD
);
159 void disable_TSC(void)
162 if (!test_and_set_thread_flag(TIF_NOTSC
))
164 * Must flip the CPU state synchronously with
165 * TIF_NOTSC in the current running context.
171 static void hard_enable_TSC(void)
173 write_cr4(read_cr4() & ~X86_CR4_TSD
);
176 static void enable_TSC(void)
179 if (test_and_clear_thread_flag(TIF_NOTSC
))
181 * Must flip the CPU state synchronously with
182 * TIF_NOTSC in the current running context.
188 int get_tsc_mode(unsigned long adr
)
192 if (test_thread_flag(TIF_NOTSC
))
193 val
= PR_TSC_SIGSEGV
;
197 return put_user(val
, (unsigned int __user
*)adr
);
200 int set_tsc_mode(unsigned int val
)
202 if (val
== PR_TSC_SIGSEGV
)
204 else if (val
== PR_TSC_ENABLE
)
212 void __switch_to_xtra(struct task_struct
*prev_p
, struct task_struct
*next_p
,
213 struct tss_struct
*tss
)
215 struct thread_struct
*prev
, *next
;
217 prev
= &prev_p
->thread
;
218 next
= &next_p
->thread
;
220 if (test_tsk_thread_flag(prev_p
, TIF_BLOCKSTEP
) ^
221 test_tsk_thread_flag(next_p
, TIF_BLOCKSTEP
)) {
222 unsigned long debugctl
= get_debugctlmsr();
224 debugctl
&= ~DEBUGCTLMSR_BTF
;
225 if (test_tsk_thread_flag(next_p
, TIF_BLOCKSTEP
))
226 debugctl
|= DEBUGCTLMSR_BTF
;
228 update_debugctlmsr(debugctl
);
231 if (test_tsk_thread_flag(prev_p
, TIF_NOTSC
) ^
232 test_tsk_thread_flag(next_p
, TIF_NOTSC
)) {
233 /* prev and next are different */
234 if (test_tsk_thread_flag(next_p
, TIF_NOTSC
))
240 if (test_tsk_thread_flag(next_p
, TIF_IO_BITMAP
)) {
242 * Copy the relevant range of the IO bitmap.
243 * Normally this is 128 bytes or less:
245 memcpy(tss
->io_bitmap
, next
->io_bitmap_ptr
,
246 max(prev
->io_bitmap_max
, next
->io_bitmap_max
));
247 } else if (test_tsk_thread_flag(prev_p
, TIF_IO_BITMAP
)) {
249 * Clear any possible leftover bits:
251 memset(tss
->io_bitmap
, 0xff, prev
->io_bitmap_max
);
253 propagate_user_return_notify(prev_p
, next_p
);
256 int sys_fork(struct pt_regs
*regs
)
258 return do_fork(SIGCHLD
, regs
->sp
, regs
, 0, NULL
, NULL
);
262 * This is trivial, and on the face of it looks like it
263 * could equally well be done in user mode.
265 * Not so, for quite unobvious reasons - register pressure.
266 * In user mode vfork() cannot have a stack frame, and if
267 * done by calling the "clone()" system call directly, you
268 * do not have enough call-clobbered registers to hold all
269 * the information you need.
271 int sys_vfork(struct pt_regs
*regs
)
273 return do_fork(CLONE_VFORK
| CLONE_VM
| SIGCHLD
, regs
->sp
, regs
, 0,
278 sys_clone(unsigned long clone_flags
, unsigned long newsp
,
279 void __user
*parent_tid
, void __user
*child_tid
, struct pt_regs
*regs
)
283 return do_fork(clone_flags
, newsp
, regs
, 0, parent_tid
, child_tid
);
287 * This gets run with %si containing the
288 * function to call, and %di containing
291 extern void kernel_thread_helper(void);
294 * Create a kernel thread
296 int kernel_thread(int (*fn
)(void *), void *arg
, unsigned long flags
)
300 memset(®s
, 0, sizeof(regs
));
302 regs
.si
= (unsigned long) fn
;
303 regs
.di
= (unsigned long) arg
;
308 regs
.fs
= __KERNEL_PERCPU
;
309 regs
.gs
= __KERNEL_STACK_CANARY
;
311 regs
.ss
= __KERNEL_DS
;
315 regs
.ip
= (unsigned long) kernel_thread_helper
;
316 regs
.cs
= __KERNEL_CS
| get_kernel_rpl();
317 regs
.flags
= X86_EFLAGS_IF
| X86_EFLAGS_BIT1
;
319 /* Ok, create the new process.. */
320 return do_fork(flags
| CLONE_VM
| CLONE_UNTRACED
, 0, ®s
, 0, NULL
, NULL
);
322 EXPORT_SYMBOL(kernel_thread
);
325 * sys_execve() executes a new program.
327 long sys_execve(const char __user
*name
,
328 const char __user
*const __user
*argv
,
329 const char __user
*const __user
*envp
, struct pt_regs
*regs
)
334 filename
= getname(name
);
335 error
= PTR_ERR(filename
);
336 if (IS_ERR(filename
))
338 error
= do_execve(filename
, argv
, envp
, regs
);
342 /* Make sure we don't return using sysenter.. */
343 set_thread_flag(TIF_IRET
);
352 * Idle related variables and functions
354 unsigned long boot_option_idle_override
= IDLE_NO_OVERRIDE
;
355 EXPORT_SYMBOL(boot_option_idle_override
);
358 * Powermanagement idle function, if any..
360 void (*pm_idle
)(void);
361 #ifdef CONFIG_APM_MODULE
362 EXPORT_SYMBOL(pm_idle
);
367 * This halt magic was a workaround for ancient floppy DMA
368 * wreckage. It should be safe to remove.
370 static int hlt_counter
;
371 void disable_hlt(void)
375 EXPORT_SYMBOL(disable_hlt
);
377 void enable_hlt(void)
381 EXPORT_SYMBOL(enable_hlt
);
383 static inline int hlt_use_halt(void)
385 return (!hlt_counter
&& boot_cpu_data
.hlt_works_ok
);
388 static inline int hlt_use_halt(void)
395 static inline void play_dead(void)
402 void enter_idle(void)
404 percpu_write(is_idle
, 1);
405 atomic_notifier_call_chain(&idle_notifier
, IDLE_START
, NULL
);
408 static void __exit_idle(void)
410 if (x86_test_and_clear_bit_percpu(0, is_idle
) == 0)
412 atomic_notifier_call_chain(&idle_notifier
, IDLE_END
, NULL
);
415 /* Called from interrupts to signify idle end */
418 /* idle loop has pid 0 */
426 * The idle thread. There's no useful work to be
427 * done, so just try to conserve power and have a
428 * low exit latency (ie sit in a loop waiting for
429 * somebody to say that they'd like to reschedule)
434 * If we're the non-boot CPU, nothing set the stack canary up
435 * for us. CPU0 already has it initialized but no harm in
436 * doing it again. This is a good place for updating it, as
437 * we wont ever return from this function (so the invalid
438 * canaries already on the stack wont ever trigger).
440 boot_init_stack_canary();
441 current_thread_info()->status
|= TS_POLLING
;
444 tick_nohz_idle_enter();
446 while (!need_resched()) {
449 if (cpu_is_offline(smp_processor_id()))
453 * Idle routines should keep interrupts disabled
454 * from here on, until they go to idle.
455 * Otherwise, idle callbacks can misfire.
462 /* Don't trace irqs off for idle */
463 stop_critical_timings();
465 /* enter_idle() needs rcu for notifiers */
468 if (cpuidle_idle_call())
472 start_critical_timings();
474 /* In many cases the interrupt that ended idle
475 has already called exit_idle. But some idle
476 loops can be woken up without interrupt. */
480 tick_nohz_idle_exit();
481 preempt_enable_no_resched();
488 * We use this if we don't have any better
491 void default_idle(void)
493 if (hlt_use_halt()) {
494 trace_power_start_rcuidle(POWER_CSTATE
, 1, smp_processor_id());
495 trace_cpu_idle_rcuidle(1, smp_processor_id());
496 current_thread_info()->status
&= ~TS_POLLING
;
498 * TS_POLLING-cleared state must be visible before we
504 safe_halt(); /* enables interrupts racelessly */
507 current_thread_info()->status
|= TS_POLLING
;
508 trace_power_end_rcuidle(smp_processor_id());
509 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
512 /* loop is done by the caller */
516 #ifdef CONFIG_APM_MODULE
517 EXPORT_SYMBOL(default_idle
);
520 bool set_pm_idle_to_default(void)
522 bool ret
= !!pm_idle
;
524 pm_idle
= default_idle
;
528 void stop_this_cpu(void *dummy
)
534 set_cpu_online(smp_processor_id(), false);
535 disable_local_APIC();
538 if (hlt_works(smp_processor_id()))
543 static void do_nothing(void *unused
)
548 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
549 * pm_idle and update to new pm_idle value. Required while changing pm_idle
550 * handler on SMP systems.
552 * Caller must have changed pm_idle to the new value before the call. Old
553 * pm_idle value will not be used by any CPU after the return of this function.
555 void cpu_idle_wait(void)
558 /* kick all the CPUs so that they exit out of pm_idle */
559 smp_call_function(do_nothing
, NULL
, 1);
561 EXPORT_SYMBOL_GPL(cpu_idle_wait
);
563 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
564 static void mwait_idle(void)
566 if (!need_resched()) {
567 trace_power_start_rcuidle(POWER_CSTATE
, 1, smp_processor_id());
568 trace_cpu_idle_rcuidle(1, smp_processor_id());
569 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR
))
570 clflush((void *)¤t_thread_info()->flags
);
572 __monitor((void *)¤t_thread_info()->flags
, 0, 0);
578 trace_power_end_rcuidle(smp_processor_id());
579 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
585 * On SMP it's slightly faster (but much more power-consuming!)
586 * to poll the ->work.need_resched flag instead of waiting for the
587 * cross-CPU IPI to arrive. Use this option with caution.
589 static void poll_idle(void)
591 trace_power_start_rcuidle(POWER_CSTATE
, 0, smp_processor_id());
592 trace_cpu_idle_rcuidle(0, smp_processor_id());
594 while (!need_resched())
596 trace_power_end_rcuidle(smp_processor_id());
597 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
601 * mwait selection logic:
603 * It depends on the CPU. For AMD CPUs that support MWAIT this is
604 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
605 * then depend on a clock divisor and current Pstate of the core. If
606 * all cores of a processor are in halt state (C1) the processor can
607 * enter the C1E (C1 enhanced) state. If mwait is used this will never
610 * idle=mwait overrides this decision and forces the usage of mwait.
613 #define MWAIT_INFO 0x05
614 #define MWAIT_ECX_EXTENDED_INFO 0x01
615 #define MWAIT_EDX_C1 0xf0
617 int mwait_usable(const struct cpuinfo_x86
*c
)
619 u32 eax
, ebx
, ecx
, edx
;
621 if (boot_option_idle_override
== IDLE_FORCE_MWAIT
)
624 if (c
->cpuid_level
< MWAIT_INFO
)
627 cpuid(MWAIT_INFO
, &eax
, &ebx
, &ecx
, &edx
);
628 /* Check, whether EDX has extended info about MWAIT */
629 if (!(ecx
& MWAIT_ECX_EXTENDED_INFO
))
633 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
636 return (edx
& MWAIT_EDX_C1
);
639 bool amd_e400_c1e_detected
;
640 EXPORT_SYMBOL(amd_e400_c1e_detected
);
642 static cpumask_var_t amd_e400_c1e_mask
;
644 void amd_e400_remove_cpu(int cpu
)
646 if (amd_e400_c1e_mask
!= NULL
)
647 cpumask_clear_cpu(cpu
, amd_e400_c1e_mask
);
651 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
652 * pending message MSR. If we detect C1E, then we handle it the same
653 * way as C3 power states (local apic timer and TSC stop)
655 static void amd_e400_idle(void)
660 if (!amd_e400_c1e_detected
) {
663 rdmsr(MSR_K8_INT_PENDING_MSG
, lo
, hi
);
665 if (lo
& K8_INTP_C1E_ACTIVE_MASK
) {
666 amd_e400_c1e_detected
= true;
667 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC
))
668 mark_tsc_unstable("TSC halt in AMD C1E");
669 printk(KERN_INFO
"System has AMD C1E enabled\n");
673 if (amd_e400_c1e_detected
) {
674 int cpu
= smp_processor_id();
676 if (!cpumask_test_cpu(cpu
, amd_e400_c1e_mask
)) {
677 cpumask_set_cpu(cpu
, amd_e400_c1e_mask
);
679 * Force broadcast so ACPI can not interfere.
681 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE
,
683 printk(KERN_INFO
"Switch to broadcast mode on CPU%d\n",
686 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER
, &cpu
);
691 * The switch back from broadcast mode needs to be
692 * called with interrupts disabled.
695 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT
, &cpu
);
701 void __cpuinit
select_idle_routine(const struct cpuinfo_x86
*c
)
704 if (pm_idle
== poll_idle
&& smp_num_siblings
> 1) {
705 printk_once(KERN_WARNING
"WARNING: polling idle and HT enabled,"
706 " performance may degrade.\n");
712 if (cpu_has(c
, X86_FEATURE_MWAIT
) && mwait_usable(c
)) {
714 * One CPU supports mwait => All CPUs supports mwait
716 printk(KERN_INFO
"using mwait in idle threads.\n");
717 pm_idle
= mwait_idle
;
718 } else if (cpu_has_amd_erratum(amd_erratum_400
)) {
719 /* E400: APIC timer interrupt does not wake up CPU from C1e */
720 printk(KERN_INFO
"using AMD E400 aware idle routine\n");
721 pm_idle
= amd_e400_idle
;
723 pm_idle
= default_idle
;
726 void __init
init_amd_e400_c1e_mask(void)
728 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
729 if (pm_idle
== amd_e400_idle
)
730 zalloc_cpumask_var(&amd_e400_c1e_mask
, GFP_KERNEL
);
733 static int __init
idle_setup(char *str
)
738 if (!strcmp(str
, "poll")) {
739 printk("using polling idle threads.\n");
741 boot_option_idle_override
= IDLE_POLL
;
742 } else if (!strcmp(str
, "mwait")) {
743 boot_option_idle_override
= IDLE_FORCE_MWAIT
;
744 WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
745 } else if (!strcmp(str
, "halt")) {
747 * When the boot option of idle=halt is added, halt is
748 * forced to be used for CPU idle. In such case CPU C2/C3
749 * won't be used again.
750 * To continue to load the CPU idle driver, don't touch
751 * the boot_option_idle_override.
753 pm_idle
= default_idle
;
754 boot_option_idle_override
= IDLE_HALT
;
755 } else if (!strcmp(str
, "nomwait")) {
757 * If the boot option of "idle=nomwait" is added,
758 * it means that mwait will be disabled for CPU C2/C3
759 * states. In such case it won't touch the variable
760 * of boot_option_idle_override.
762 boot_option_idle_override
= IDLE_NOMWAIT
;
768 early_param("idle", idle_setup
);
770 unsigned long arch_align_stack(unsigned long sp
)
772 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
773 sp
-= get_random_int() % 8192;
777 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
779 unsigned long range_end
= mm
->brk
+ 0x02000000;
780 return randomize_range(mm
->brk
, range_end
, 0) ? : mm
->brk
;