1 #include <linux/errno.h>
2 #include <linux/kernel.h>
5 #include <linux/prctl.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/module.h>
10 #include <linux/clockchips.h>
11 #include <linux/random.h>
12 #include <linux/user-return-notifier.h>
13 #include <linux/dmi.h>
14 #include <linux/utsname.h>
15 #include <trace/events/power.h>
16 #include <linux/hw_breakpoint.h>
17 #include <asm/system.h>
19 #include <asm/syscalls.h>
21 #include <asm/uaccess.h>
24 #include <asm/debugreg.h>
26 unsigned long idle_halt
;
27 EXPORT_SYMBOL(idle_halt
);
28 unsigned long idle_nomwait
;
29 EXPORT_SYMBOL(idle_nomwait
);
31 struct kmem_cache
*task_xstate_cachep
;
33 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
38 if (fpu_allocated(&src
->thread
.fpu
)) {
39 memset(&dst
->thread
.fpu
, 0, sizeof(dst
->thread
.fpu
));
40 ret
= fpu_alloc(&dst
->thread
.fpu
);
43 fpu_copy(&dst
->thread
.fpu
, &src
->thread
.fpu
);
48 void free_thread_xstate(struct task_struct
*tsk
)
50 fpu_free(&tsk
->thread
.fpu
);
51 WARN(tsk
->thread
.ds_ctx
, "leaking DS context\n");
54 void free_thread_info(struct thread_info
*ti
)
56 free_thread_xstate(ti
->task
);
57 free_pages((unsigned long)ti
, get_order(THREAD_SIZE
));
60 void arch_task_cache_init(void)
63 kmem_cache_create("task_xstate", xstate_size
,
64 __alignof__(union thread_xstate
),
65 SLAB_PANIC
| SLAB_NOTRACK
, NULL
);
69 * Free current thread data structures etc..
71 void exit_thread(void)
73 struct task_struct
*me
= current
;
74 struct thread_struct
*t
= &me
->thread
;
75 unsigned long *bp
= t
->io_bitmap_ptr
;
78 struct tss_struct
*tss
= &per_cpu(init_tss
, get_cpu());
80 t
->io_bitmap_ptr
= NULL
;
81 clear_thread_flag(TIF_IO_BITMAP
);
83 * Careful, clear this in the TSS too:
85 memset(tss
->io_bitmap
, 0xff, t
->io_bitmap_max
);
92 void show_regs(struct pt_regs
*regs
)
95 show_trace(NULL
, regs
, (unsigned long *)kernel_stack_pointer(regs
),
99 void show_regs_common(void)
101 const char *board
, *product
;
103 board
= dmi_get_system_info(DMI_BOARD_NAME
);
106 product
= dmi_get_system_info(DMI_PRODUCT_NAME
);
110 printk(KERN_CONT
"\n");
111 printk(KERN_DEFAULT
"Pid: %d, comm: %.20s %s %s %.*s %s/%s\n",
112 current
->pid
, current
->comm
, print_tainted(),
113 init_utsname()->release
,
114 (int)strcspn(init_utsname()->version
, " "),
115 init_utsname()->version
, board
, product
);
118 void flush_thread(void)
120 struct task_struct
*tsk
= current
;
122 flush_ptrace_hw_breakpoint(tsk
);
123 memset(tsk
->thread
.tls_array
, 0, sizeof(tsk
->thread
.tls_array
));
125 * Forget coprocessor state..
127 tsk
->fpu_counter
= 0;
132 static void hard_disable_TSC(void)
134 write_cr4(read_cr4() | X86_CR4_TSD
);
137 void disable_TSC(void)
140 if (!test_and_set_thread_flag(TIF_NOTSC
))
142 * Must flip the CPU state synchronously with
143 * TIF_NOTSC in the current running context.
149 static void hard_enable_TSC(void)
151 write_cr4(read_cr4() & ~X86_CR4_TSD
);
154 static void enable_TSC(void)
157 if (test_and_clear_thread_flag(TIF_NOTSC
))
159 * Must flip the CPU state synchronously with
160 * TIF_NOTSC in the current running context.
166 int get_tsc_mode(unsigned long adr
)
170 if (test_thread_flag(TIF_NOTSC
))
171 val
= PR_TSC_SIGSEGV
;
175 return put_user(val
, (unsigned int __user
*)adr
);
178 int set_tsc_mode(unsigned int val
)
180 if (val
== PR_TSC_SIGSEGV
)
182 else if (val
== PR_TSC_ENABLE
)
190 void __switch_to_xtra(struct task_struct
*prev_p
, struct task_struct
*next_p
,
191 struct tss_struct
*tss
)
193 struct thread_struct
*prev
, *next
;
195 prev
= &prev_p
->thread
;
196 next
= &next_p
->thread
;
198 if (test_tsk_thread_flag(next_p
, TIF_DS_AREA_MSR
) ||
199 test_tsk_thread_flag(prev_p
, TIF_DS_AREA_MSR
))
200 ds_switch_to(prev_p
, next_p
);
201 else if (next
->debugctlmsr
!= prev
->debugctlmsr
)
202 update_debugctlmsr(next
->debugctlmsr
);
204 if (test_tsk_thread_flag(prev_p
, TIF_NOTSC
) ^
205 test_tsk_thread_flag(next_p
, TIF_NOTSC
)) {
206 /* prev and next are different */
207 if (test_tsk_thread_flag(next_p
, TIF_NOTSC
))
213 if (test_tsk_thread_flag(next_p
, TIF_IO_BITMAP
)) {
215 * Copy the relevant range of the IO bitmap.
216 * Normally this is 128 bytes or less:
218 memcpy(tss
->io_bitmap
, next
->io_bitmap_ptr
,
219 max(prev
->io_bitmap_max
, next
->io_bitmap_max
));
220 } else if (test_tsk_thread_flag(prev_p
, TIF_IO_BITMAP
)) {
222 * Clear any possible leftover bits:
224 memset(tss
->io_bitmap
, 0xff, prev
->io_bitmap_max
);
226 propagate_user_return_notify(prev_p
, next_p
);
229 int sys_fork(struct pt_regs
*regs
)
231 return do_fork(SIGCHLD
, regs
->sp
, regs
, 0, NULL
, NULL
);
235 * This is trivial, and on the face of it looks like it
236 * could equally well be done in user mode.
238 * Not so, for quite unobvious reasons - register pressure.
239 * In user mode vfork() cannot have a stack frame, and if
240 * done by calling the "clone()" system call directly, you
241 * do not have enough call-clobbered registers to hold all
242 * the information you need.
244 int sys_vfork(struct pt_regs
*regs
)
246 return do_fork(CLONE_VFORK
| CLONE_VM
| SIGCHLD
, regs
->sp
, regs
, 0,
251 sys_clone(unsigned long clone_flags
, unsigned long newsp
,
252 void __user
*parent_tid
, void __user
*child_tid
, struct pt_regs
*regs
)
256 return do_fork(clone_flags
, newsp
, regs
, 0, parent_tid
, child_tid
);
260 * This gets run with %si containing the
261 * function to call, and %di containing
264 extern void kernel_thread_helper(void);
267 * Create a kernel thread
269 int kernel_thread(int (*fn
)(void *), void *arg
, unsigned long flags
)
273 memset(®s
, 0, sizeof(regs
));
275 regs
.si
= (unsigned long) fn
;
276 regs
.di
= (unsigned long) arg
;
281 regs
.fs
= __KERNEL_PERCPU
;
282 regs
.gs
= __KERNEL_STACK_CANARY
;
284 regs
.ss
= __KERNEL_DS
;
288 regs
.ip
= (unsigned long) kernel_thread_helper
;
289 regs
.cs
= __KERNEL_CS
| get_kernel_rpl();
290 regs
.flags
= X86_EFLAGS_IF
| 0x2;
292 /* Ok, create the new process.. */
293 return do_fork(flags
| CLONE_VM
| CLONE_UNTRACED
, 0, ®s
, 0, NULL
, NULL
);
295 EXPORT_SYMBOL(kernel_thread
);
298 * sys_execve() executes a new program.
300 long sys_execve(char __user
*name
, char __user
* __user
*argv
,
301 char __user
* __user
*envp
, struct pt_regs
*regs
)
306 filename
= getname(name
);
307 error
= PTR_ERR(filename
);
308 if (IS_ERR(filename
))
310 error
= do_execve(filename
, argv
, envp
, regs
);
314 /* Make sure we don't return using sysenter.. */
315 set_thread_flag(TIF_IRET
);
324 * Idle related variables and functions
326 unsigned long boot_option_idle_override
= 0;
327 EXPORT_SYMBOL(boot_option_idle_override
);
330 * Powermanagement idle function, if any..
332 void (*pm_idle
)(void);
333 EXPORT_SYMBOL(pm_idle
);
337 * This halt magic was a workaround for ancient floppy DMA
338 * wreckage. It should be safe to remove.
340 static int hlt_counter
;
341 void disable_hlt(void)
345 EXPORT_SYMBOL(disable_hlt
);
347 void enable_hlt(void)
351 EXPORT_SYMBOL(enable_hlt
);
353 static inline int hlt_use_halt(void)
355 return (!hlt_counter
&& boot_cpu_data
.hlt_works_ok
);
358 static inline int hlt_use_halt(void)
365 * We use this if we don't have any better
368 void default_idle(void)
370 if (hlt_use_halt()) {
371 trace_power_start(POWER_CSTATE
, 1);
372 current_thread_info()->status
&= ~TS_POLLING
;
374 * TS_POLLING-cleared state must be visible before we
380 safe_halt(); /* enables interrupts racelessly */
383 current_thread_info()->status
|= TS_POLLING
;
386 /* loop is done by the caller */
390 #ifdef CONFIG_APM_MODULE
391 EXPORT_SYMBOL(default_idle
);
394 void stop_this_cpu(void *dummy
)
400 set_cpu_online(smp_processor_id(), false);
401 disable_local_APIC();
404 if (hlt_works(smp_processor_id()))
409 static void do_nothing(void *unused
)
414 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
415 * pm_idle and update to new pm_idle value. Required while changing pm_idle
416 * handler on SMP systems.
418 * Caller must have changed pm_idle to the new value before the call. Old
419 * pm_idle value will not be used by any CPU after the return of this function.
421 void cpu_idle_wait(void)
424 /* kick all the CPUs so that they exit out of pm_idle */
425 smp_call_function(do_nothing
, NULL
, 1);
427 EXPORT_SYMBOL_GPL(cpu_idle_wait
);
430 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
431 * which can obviate IPI to trigger checking of need_resched.
432 * We execute MONITOR against need_resched and enter optimized wait state
433 * through MWAIT. Whenever someone changes need_resched, we would be woken
434 * up from MWAIT (without an IPI).
436 * New with Core Duo processors, MWAIT can take some hints based on CPU
439 void mwait_idle_with_hints(unsigned long ax
, unsigned long cx
)
441 trace_power_start(POWER_CSTATE
, (ax
>>4)+1);
442 if (!need_resched()) {
443 if (cpu_has(¤t_cpu_data
, X86_FEATURE_CLFLUSH_MONITOR
))
444 clflush((void *)¤t_thread_info()->flags
);
446 __monitor((void *)¤t_thread_info()->flags
, 0, 0);
453 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
454 static void mwait_idle(void)
456 if (!need_resched()) {
457 trace_power_start(POWER_CSTATE
, 1);
458 if (cpu_has(¤t_cpu_data
, X86_FEATURE_CLFLUSH_MONITOR
))
459 clflush((void *)¤t_thread_info()->flags
);
461 __monitor((void *)¤t_thread_info()->flags
, 0, 0);
472 * On SMP it's slightly faster (but much more power-consuming!)
473 * to poll the ->work.need_resched flag instead of waiting for the
474 * cross-CPU IPI to arrive. Use this option with caution.
476 static void poll_idle(void)
478 trace_power_start(POWER_CSTATE
, 0);
480 while (!need_resched())
486 * mwait selection logic:
488 * It depends on the CPU. For AMD CPUs that support MWAIT this is
489 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
490 * then depend on a clock divisor and current Pstate of the core. If
491 * all cores of a processor are in halt state (C1) the processor can
492 * enter the C1E (C1 enhanced) state. If mwait is used this will never
495 * idle=mwait overrides this decision and forces the usage of mwait.
497 static int __cpuinitdata force_mwait
;
499 #define MWAIT_INFO 0x05
500 #define MWAIT_ECX_EXTENDED_INFO 0x01
501 #define MWAIT_EDX_C1 0xf0
503 static int __cpuinit
mwait_usable(const struct cpuinfo_x86
*c
)
505 u32 eax
, ebx
, ecx
, edx
;
510 if (c
->cpuid_level
< MWAIT_INFO
)
513 cpuid(MWAIT_INFO
, &eax
, &ebx
, &ecx
, &edx
);
514 /* Check, whether EDX has extended info about MWAIT */
515 if (!(ecx
& MWAIT_ECX_EXTENDED_INFO
))
519 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
522 return (edx
& MWAIT_EDX_C1
);
526 * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e.
527 * For more information see
528 * - Erratum #400 for NPT family 0xf and family 0x10 CPUs
529 * - Erratum #365 for family 0x11 (not affected because C1e not in use)
531 static int __cpuinit
check_c1e_idle(const struct cpuinfo_x86
*c
)
534 if (c
->x86_vendor
!= X86_VENDOR_AMD
)
537 /* Family 0x0f models < rev F do not have C1E */
538 if (c
->x86
== 0x0F && c
->x86_model
>= 0x40)
541 if (c
->x86
== 0x10) {
543 * check OSVW bit for CPUs that are not affected
546 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH
, val
);
548 rdmsrl(MSR_AMD64_OSVW_STATUS
, val
);
559 static cpumask_var_t c1e_mask
;
560 static int c1e_detected
;
562 void c1e_remove_cpu(int cpu
)
564 if (c1e_mask
!= NULL
)
565 cpumask_clear_cpu(cpu
, c1e_mask
);
569 * C1E aware idle routine. We check for C1E active in the interrupt
570 * pending message MSR. If we detect C1E, then we handle it the same
571 * way as C3 power states (local apic timer and TSC stop)
573 static void c1e_idle(void)
581 rdmsr(MSR_K8_INT_PENDING_MSG
, lo
, hi
);
582 if (lo
& K8_INTP_C1E_ACTIVE_MASK
) {
584 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC
))
585 mark_tsc_unstable("TSC halt in AMD C1E");
586 printk(KERN_INFO
"System has AMD C1E enabled\n");
587 set_cpu_cap(&boot_cpu_data
, X86_FEATURE_AMDC1E
);
592 int cpu
= smp_processor_id();
594 if (!cpumask_test_cpu(cpu
, c1e_mask
)) {
595 cpumask_set_cpu(cpu
, c1e_mask
);
597 * Force broadcast so ACPI can not interfere.
599 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE
,
601 printk(KERN_INFO
"Switch to broadcast mode on CPU%d\n",
604 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER
, &cpu
);
609 * The switch back from broadcast mode needs to be
610 * called with interrupts disabled.
613 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT
, &cpu
);
619 void __cpuinit
select_idle_routine(const struct cpuinfo_x86
*c
)
622 if (pm_idle
== poll_idle
&& smp_num_siblings
> 1) {
623 printk_once(KERN_WARNING
"WARNING: polling idle and HT enabled,"
624 " performance may degrade.\n");
630 if (cpu_has(c
, X86_FEATURE_MWAIT
) && mwait_usable(c
)) {
632 * One CPU supports mwait => All CPUs supports mwait
634 printk(KERN_INFO
"using mwait in idle threads.\n");
635 pm_idle
= mwait_idle
;
636 } else if (check_c1e_idle(c
)) {
637 printk(KERN_INFO
"using C1E aware idle routine\n");
640 pm_idle
= default_idle
;
643 void __init
init_c1e_mask(void)
645 /* If we're using c1e_idle, we need to allocate c1e_mask. */
646 if (pm_idle
== c1e_idle
)
647 zalloc_cpumask_var(&c1e_mask
, GFP_KERNEL
);
650 static int __init
idle_setup(char *str
)
655 if (!strcmp(str
, "poll")) {
656 printk("using polling idle threads.\n");
658 } else if (!strcmp(str
, "mwait"))
660 else if (!strcmp(str
, "halt")) {
662 * When the boot option of idle=halt is added, halt is
663 * forced to be used for CPU idle. In such case CPU C2/C3
664 * won't be used again.
665 * To continue to load the CPU idle driver, don't touch
666 * the boot_option_idle_override.
668 pm_idle
= default_idle
;
671 } else if (!strcmp(str
, "nomwait")) {
673 * If the boot option of "idle=nomwait" is added,
674 * it means that mwait will be disabled for CPU C2/C3
675 * states. In such case it won't touch the variable
676 * of boot_option_idle_override.
683 boot_option_idle_override
= 1;
686 early_param("idle", idle_setup
);
688 unsigned long arch_align_stack(unsigned long sp
)
690 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
691 sp
-= get_random_int() % 8192;
695 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
697 unsigned long range_end
= mm
->brk
+ 0x02000000;
698 return randomize_range(mm
->brk
, range_end
, 0) ? : mm
->brk
;