1 #include <linux/errno.h>
2 #include <linux/kernel.h>
5 #include <linux/prctl.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/module.h>
10 #include <linux/clockchips.h>
11 #include <linux/random.h>
12 #include <linux/user-return-notifier.h>
13 #include <linux/dmi.h>
14 #include <linux/utsname.h>
15 #include <linux/stackprotector.h>
16 #include <linux/tick.h>
17 #include <linux/cpuidle.h>
18 #include <trace/events/power.h>
19 #include <linux/hw_breakpoint.h>
21 #include <asm/system.h>
23 #include <asm/syscalls.h>
25 #include <asm/uaccess.h>
27 #include <asm/fpu-internal.h>
28 #include <asm/debugreg.h>
32 static DEFINE_PER_CPU(unsigned char, is_idle
);
33 static ATOMIC_NOTIFIER_HEAD(idle_notifier
);
35 void idle_notifier_register(struct notifier_block
*n
)
37 atomic_notifier_chain_register(&idle_notifier
, n
);
39 EXPORT_SYMBOL_GPL(idle_notifier_register
);
41 void idle_notifier_unregister(struct notifier_block
*n
)
43 atomic_notifier_chain_unregister(&idle_notifier
, n
);
45 EXPORT_SYMBOL_GPL(idle_notifier_unregister
);
48 struct kmem_cache
*task_xstate_cachep
;
49 EXPORT_SYMBOL_GPL(task_xstate_cachep
);
51 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
56 if (fpu_allocated(&src
->thread
.fpu
)) {
57 memset(&dst
->thread
.fpu
, 0, sizeof(dst
->thread
.fpu
));
58 ret
= fpu_alloc(&dst
->thread
.fpu
);
61 fpu_copy(&dst
->thread
.fpu
, &src
->thread
.fpu
);
66 void free_thread_xstate(struct task_struct
*tsk
)
68 fpu_free(&tsk
->thread
.fpu
);
71 void free_thread_info(struct thread_info
*ti
)
73 free_thread_xstate(ti
->task
);
74 free_pages((unsigned long)ti
, THREAD_ORDER
);
77 void arch_task_cache_init(void)
80 kmem_cache_create("task_xstate", xstate_size
,
81 __alignof__(union thread_xstate
),
82 SLAB_PANIC
| SLAB_NOTRACK
, NULL
);
86 * Free current thread data structures etc..
88 void exit_thread(void)
90 struct task_struct
*me
= current
;
91 struct thread_struct
*t
= &me
->thread
;
92 unsigned long *bp
= t
->io_bitmap_ptr
;
95 struct tss_struct
*tss
= &per_cpu(init_tss
, get_cpu());
97 t
->io_bitmap_ptr
= NULL
;
98 clear_thread_flag(TIF_IO_BITMAP
);
100 * Careful, clear this in the TSS too:
102 memset(tss
->io_bitmap
, 0xff, t
->io_bitmap_max
);
103 t
->io_bitmap_max
= 0;
109 void show_regs(struct pt_regs
*regs
)
111 show_registers(regs
);
112 show_trace(NULL
, regs
, (unsigned long *)kernel_stack_pointer(regs
), 0);
115 void show_regs_common(void)
117 const char *vendor
, *product
, *board
;
119 vendor
= dmi_get_system_info(DMI_SYS_VENDOR
);
122 product
= dmi_get_system_info(DMI_PRODUCT_NAME
);
126 /* Board Name is optional */
127 board
= dmi_get_system_info(DMI_BOARD_NAME
);
129 printk(KERN_CONT
"\n");
130 printk(KERN_DEFAULT
"Pid: %d, comm: %.20s %s %s %.*s",
131 current
->pid
, current
->comm
, print_tainted(),
132 init_utsname()->release
,
133 (int)strcspn(init_utsname()->version
, " "),
134 init_utsname()->version
);
135 printk(KERN_CONT
" %s %s", vendor
, product
);
137 printk(KERN_CONT
"/%s", board
);
138 printk(KERN_CONT
"\n");
141 void flush_thread(void)
143 struct task_struct
*tsk
= current
;
145 flush_ptrace_hw_breakpoint(tsk
);
146 memset(tsk
->thread
.tls_array
, 0, sizeof(tsk
->thread
.tls_array
));
148 * Forget coprocessor state..
150 tsk
->fpu_counter
= 0;
155 static void hard_disable_TSC(void)
157 write_cr4(read_cr4() | X86_CR4_TSD
);
160 void disable_TSC(void)
163 if (!test_and_set_thread_flag(TIF_NOTSC
))
165 * Must flip the CPU state synchronously with
166 * TIF_NOTSC in the current running context.
172 static void hard_enable_TSC(void)
174 write_cr4(read_cr4() & ~X86_CR4_TSD
);
177 static void enable_TSC(void)
180 if (test_and_clear_thread_flag(TIF_NOTSC
))
182 * Must flip the CPU state synchronously with
183 * TIF_NOTSC in the current running context.
189 int get_tsc_mode(unsigned long adr
)
193 if (test_thread_flag(TIF_NOTSC
))
194 val
= PR_TSC_SIGSEGV
;
198 return put_user(val
, (unsigned int __user
*)adr
);
201 int set_tsc_mode(unsigned int val
)
203 if (val
== PR_TSC_SIGSEGV
)
205 else if (val
== PR_TSC_ENABLE
)
213 void __switch_to_xtra(struct task_struct
*prev_p
, struct task_struct
*next_p
,
214 struct tss_struct
*tss
)
216 struct thread_struct
*prev
, *next
;
218 prev
= &prev_p
->thread
;
219 next
= &next_p
->thread
;
221 if (test_tsk_thread_flag(prev_p
, TIF_BLOCKSTEP
) ^
222 test_tsk_thread_flag(next_p
, TIF_BLOCKSTEP
)) {
223 unsigned long debugctl
= get_debugctlmsr();
225 debugctl
&= ~DEBUGCTLMSR_BTF
;
226 if (test_tsk_thread_flag(next_p
, TIF_BLOCKSTEP
))
227 debugctl
|= DEBUGCTLMSR_BTF
;
229 update_debugctlmsr(debugctl
);
232 if (test_tsk_thread_flag(prev_p
, TIF_NOTSC
) ^
233 test_tsk_thread_flag(next_p
, TIF_NOTSC
)) {
234 /* prev and next are different */
235 if (test_tsk_thread_flag(next_p
, TIF_NOTSC
))
241 if (test_tsk_thread_flag(next_p
, TIF_IO_BITMAP
)) {
243 * Copy the relevant range of the IO bitmap.
244 * Normally this is 128 bytes or less:
246 memcpy(tss
->io_bitmap
, next
->io_bitmap_ptr
,
247 max(prev
->io_bitmap_max
, next
->io_bitmap_max
));
248 } else if (test_tsk_thread_flag(prev_p
, TIF_IO_BITMAP
)) {
250 * Clear any possible leftover bits:
252 memset(tss
->io_bitmap
, 0xff, prev
->io_bitmap_max
);
254 propagate_user_return_notify(prev_p
, next_p
);
257 int sys_fork(struct pt_regs
*regs
)
259 return do_fork(SIGCHLD
, regs
->sp
, regs
, 0, NULL
, NULL
);
263 * This is trivial, and on the face of it looks like it
264 * could equally well be done in user mode.
266 * Not so, for quite unobvious reasons - register pressure.
267 * In user mode vfork() cannot have a stack frame, and if
268 * done by calling the "clone()" system call directly, you
269 * do not have enough call-clobbered registers to hold all
270 * the information you need.
272 int sys_vfork(struct pt_regs
*regs
)
274 return do_fork(CLONE_VFORK
| CLONE_VM
| SIGCHLD
, regs
->sp
, regs
, 0,
279 sys_clone(unsigned long clone_flags
, unsigned long newsp
,
280 void __user
*parent_tid
, void __user
*child_tid
, struct pt_regs
*regs
)
284 return do_fork(clone_flags
, newsp
, regs
, 0, parent_tid
, child_tid
);
288 * This gets run with %si containing the
289 * function to call, and %di containing
292 extern void kernel_thread_helper(void);
295 * Create a kernel thread
297 int kernel_thread(int (*fn
)(void *), void *arg
, unsigned long flags
)
301 memset(®s
, 0, sizeof(regs
));
303 regs
.si
= (unsigned long) fn
;
304 regs
.di
= (unsigned long) arg
;
309 regs
.fs
= __KERNEL_PERCPU
;
310 regs
.gs
= __KERNEL_STACK_CANARY
;
312 regs
.ss
= __KERNEL_DS
;
316 regs
.ip
= (unsigned long) kernel_thread_helper
;
317 regs
.cs
= __KERNEL_CS
| get_kernel_rpl();
318 regs
.flags
= X86_EFLAGS_IF
| X86_EFLAGS_BIT1
;
320 /* Ok, create the new process.. */
321 return do_fork(flags
| CLONE_VM
| CLONE_UNTRACED
, 0, ®s
, 0, NULL
, NULL
);
323 EXPORT_SYMBOL(kernel_thread
);
326 * sys_execve() executes a new program.
328 long sys_execve(const char __user
*name
,
329 const char __user
*const __user
*argv
,
330 const char __user
*const __user
*envp
, struct pt_regs
*regs
)
335 filename
= getname(name
);
336 error
= PTR_ERR(filename
);
337 if (IS_ERR(filename
))
339 error
= do_execve(filename
, argv
, envp
, regs
);
343 /* Make sure we don't return using sysenter.. */
344 set_thread_flag(TIF_IRET
);
353 * Idle related variables and functions
355 unsigned long boot_option_idle_override
= IDLE_NO_OVERRIDE
;
356 EXPORT_SYMBOL(boot_option_idle_override
);
359 * Powermanagement idle function, if any..
361 void (*pm_idle
)(void);
362 #ifdef CONFIG_APM_MODULE
363 EXPORT_SYMBOL(pm_idle
);
368 * This halt magic was a workaround for ancient floppy DMA
369 * wreckage. It should be safe to remove.
371 static int hlt_counter
;
372 void disable_hlt(void)
376 EXPORT_SYMBOL(disable_hlt
);
378 void enable_hlt(void)
382 EXPORT_SYMBOL(enable_hlt
);
384 static inline int hlt_use_halt(void)
386 return (!hlt_counter
&& boot_cpu_data
.hlt_works_ok
);
389 static inline int hlt_use_halt(void)
396 static inline void play_dead(void)
403 void enter_idle(void)
405 percpu_write(is_idle
, 1);
406 atomic_notifier_call_chain(&idle_notifier
, IDLE_START
, NULL
);
409 static void __exit_idle(void)
411 if (x86_test_and_clear_bit_percpu(0, is_idle
) == 0)
413 atomic_notifier_call_chain(&idle_notifier
, IDLE_END
, NULL
);
416 /* Called from interrupts to signify idle end */
419 /* idle loop has pid 0 */
427 * The idle thread. There's no useful work to be
428 * done, so just try to conserve power and have a
429 * low exit latency (ie sit in a loop waiting for
430 * somebody to say that they'd like to reschedule)
435 * If we're the non-boot CPU, nothing set the stack canary up
436 * for us. CPU0 already has it initialized but no harm in
437 * doing it again. This is a good place for updating it, as
438 * we wont ever return from this function (so the invalid
439 * canaries already on the stack wont ever trigger).
441 boot_init_stack_canary();
442 current_thread_info()->status
|= TS_POLLING
;
445 tick_nohz_idle_enter();
447 while (!need_resched()) {
450 if (cpu_is_offline(smp_processor_id()))
454 * Idle routines should keep interrupts disabled
455 * from here on, until they go to idle.
456 * Otherwise, idle callbacks can misfire.
463 /* Don't trace irqs off for idle */
464 stop_critical_timings();
466 /* enter_idle() needs rcu for notifiers */
469 if (cpuidle_idle_call())
473 start_critical_timings();
475 /* In many cases the interrupt that ended idle
476 has already called exit_idle. But some idle
477 loops can be woken up without interrupt. */
481 tick_nohz_idle_exit();
482 preempt_enable_no_resched();
489 * We use this if we don't have any better
492 void default_idle(void)
494 if (hlt_use_halt()) {
495 trace_power_start_rcuidle(POWER_CSTATE
, 1, smp_processor_id());
496 trace_cpu_idle_rcuidle(1, smp_processor_id());
497 current_thread_info()->status
&= ~TS_POLLING
;
499 * TS_POLLING-cleared state must be visible before we
505 safe_halt(); /* enables interrupts racelessly */
508 current_thread_info()->status
|= TS_POLLING
;
509 trace_power_end_rcuidle(smp_processor_id());
510 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
513 /* loop is done by the caller */
517 #ifdef CONFIG_APM_MODULE
518 EXPORT_SYMBOL(default_idle
);
521 bool set_pm_idle_to_default(void)
523 bool ret
= !!pm_idle
;
525 pm_idle
= default_idle
;
529 void stop_this_cpu(void *dummy
)
535 set_cpu_online(smp_processor_id(), false);
536 disable_local_APIC();
539 if (hlt_works(smp_processor_id()))
544 static void do_nothing(void *unused
)
549 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
550 * pm_idle and update to new pm_idle value. Required while changing pm_idle
551 * handler on SMP systems.
553 * Caller must have changed pm_idle to the new value before the call. Old
554 * pm_idle value will not be used by any CPU after the return of this function.
556 void cpu_idle_wait(void)
559 /* kick all the CPUs so that they exit out of pm_idle */
560 smp_call_function(do_nothing
, NULL
, 1);
562 EXPORT_SYMBOL_GPL(cpu_idle_wait
);
564 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
565 static void mwait_idle(void)
567 if (!need_resched()) {
568 trace_power_start_rcuidle(POWER_CSTATE
, 1, smp_processor_id());
569 trace_cpu_idle_rcuidle(1, smp_processor_id());
570 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR
))
571 clflush((void *)¤t_thread_info()->flags
);
573 __monitor((void *)¤t_thread_info()->flags
, 0, 0);
579 trace_power_end_rcuidle(smp_processor_id());
580 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
586 * On SMP it's slightly faster (but much more power-consuming!)
587 * to poll the ->work.need_resched flag instead of waiting for the
588 * cross-CPU IPI to arrive. Use this option with caution.
590 static void poll_idle(void)
592 trace_power_start_rcuidle(POWER_CSTATE
, 0, smp_processor_id());
593 trace_cpu_idle_rcuidle(0, smp_processor_id());
595 while (!need_resched())
597 trace_power_end_rcuidle(smp_processor_id());
598 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
602 * mwait selection logic:
604 * It depends on the CPU. For AMD CPUs that support MWAIT this is
605 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
606 * then depend on a clock divisor and current Pstate of the core. If
607 * all cores of a processor are in halt state (C1) the processor can
608 * enter the C1E (C1 enhanced) state. If mwait is used this will never
611 * idle=mwait overrides this decision and forces the usage of mwait.
614 #define MWAIT_INFO 0x05
615 #define MWAIT_ECX_EXTENDED_INFO 0x01
616 #define MWAIT_EDX_C1 0xf0
618 int mwait_usable(const struct cpuinfo_x86
*c
)
620 u32 eax
, ebx
, ecx
, edx
;
622 if (boot_option_idle_override
== IDLE_FORCE_MWAIT
)
625 if (c
->cpuid_level
< MWAIT_INFO
)
628 cpuid(MWAIT_INFO
, &eax
, &ebx
, &ecx
, &edx
);
629 /* Check, whether EDX has extended info about MWAIT */
630 if (!(ecx
& MWAIT_ECX_EXTENDED_INFO
))
634 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
637 return (edx
& MWAIT_EDX_C1
);
640 bool amd_e400_c1e_detected
;
641 EXPORT_SYMBOL(amd_e400_c1e_detected
);
643 static cpumask_var_t amd_e400_c1e_mask
;
645 void amd_e400_remove_cpu(int cpu
)
647 if (amd_e400_c1e_mask
!= NULL
)
648 cpumask_clear_cpu(cpu
, amd_e400_c1e_mask
);
652 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
653 * pending message MSR. If we detect C1E, then we handle it the same
654 * way as C3 power states (local apic timer and TSC stop)
656 static void amd_e400_idle(void)
661 if (!amd_e400_c1e_detected
) {
664 rdmsr(MSR_K8_INT_PENDING_MSG
, lo
, hi
);
666 if (lo
& K8_INTP_C1E_ACTIVE_MASK
) {
667 amd_e400_c1e_detected
= true;
668 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC
))
669 mark_tsc_unstable("TSC halt in AMD C1E");
670 printk(KERN_INFO
"System has AMD C1E enabled\n");
674 if (amd_e400_c1e_detected
) {
675 int cpu
= smp_processor_id();
677 if (!cpumask_test_cpu(cpu
, amd_e400_c1e_mask
)) {
678 cpumask_set_cpu(cpu
, amd_e400_c1e_mask
);
680 * Force broadcast so ACPI can not interfere.
682 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE
,
684 printk(KERN_INFO
"Switch to broadcast mode on CPU%d\n",
687 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER
, &cpu
);
692 * The switch back from broadcast mode needs to be
693 * called with interrupts disabled.
696 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT
, &cpu
);
702 void __cpuinit
select_idle_routine(const struct cpuinfo_x86
*c
)
705 if (pm_idle
== poll_idle
&& smp_num_siblings
> 1) {
706 printk_once(KERN_WARNING
"WARNING: polling idle and HT enabled,"
707 " performance may degrade.\n");
713 if (cpu_has(c
, X86_FEATURE_MWAIT
) && mwait_usable(c
)) {
715 * One CPU supports mwait => All CPUs supports mwait
717 printk(KERN_INFO
"using mwait in idle threads.\n");
718 pm_idle
= mwait_idle
;
719 } else if (cpu_has_amd_erratum(amd_erratum_400
)) {
720 /* E400: APIC timer interrupt does not wake up CPU from C1e */
721 printk(KERN_INFO
"using AMD E400 aware idle routine\n");
722 pm_idle
= amd_e400_idle
;
724 pm_idle
= default_idle
;
727 void __init
init_amd_e400_c1e_mask(void)
729 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
730 if (pm_idle
== amd_e400_idle
)
731 zalloc_cpumask_var(&amd_e400_c1e_mask
, GFP_KERNEL
);
734 static int __init
idle_setup(char *str
)
739 if (!strcmp(str
, "poll")) {
740 printk("using polling idle threads.\n");
742 boot_option_idle_override
= IDLE_POLL
;
743 } else if (!strcmp(str
, "mwait")) {
744 boot_option_idle_override
= IDLE_FORCE_MWAIT
;
745 WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
746 } else if (!strcmp(str
, "halt")) {
748 * When the boot option of idle=halt is added, halt is
749 * forced to be used for CPU idle. In such case CPU C2/C3
750 * won't be used again.
751 * To continue to load the CPU idle driver, don't touch
752 * the boot_option_idle_override.
754 pm_idle
= default_idle
;
755 boot_option_idle_override
= IDLE_HALT
;
756 } else if (!strcmp(str
, "nomwait")) {
758 * If the boot option of "idle=nomwait" is added,
759 * it means that mwait will be disabled for CPU C2/C3
760 * states. In such case it won't touch the variable
761 * of boot_option_idle_override.
763 boot_option_idle_override
= IDLE_NOMWAIT
;
769 early_param("idle", idle_setup
);
771 unsigned long arch_align_stack(unsigned long sp
)
773 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
774 sp
-= get_random_int() % 8192;
778 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
780 unsigned long range_end
= mm
->brk
+ 0x02000000;
781 return randomize_range(mm
->brk
, range_end
, 0) ? : mm
->brk
;