1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/errno.h>
4 #include <linux/kernel.h>
7 #include <linux/prctl.h>
8 #include <linux/slab.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
12 #include <linux/clockchips.h>
13 #include <linux/random.h>
14 #include <linux/user-return-notifier.h>
15 #include <linux/dmi.h>
16 #include <linux/utsname.h>
17 #include <linux/stackprotector.h>
18 #include <linux/tick.h>
19 #include <linux/cpuidle.h>
20 #include <trace/events/power.h>
21 #include <linux/hw_breakpoint.h>
24 #include <asm/syscalls.h>
26 #include <asm/uaccess.h>
28 #include <asm/fpu-internal.h>
29 #include <asm/debugreg.h>
33 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
34 * no more per-task TSS's. The TSS size is kept cacheline-aligned
35 * so they are allowed to end up in the .data..cacheline_aligned
36 * section. Since TSS's are completely CPU-local, we want them
37 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
39 __visible
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct
, init_tss
) = INIT_TSS
;
42 static DEFINE_PER_CPU(unsigned char, is_idle
);
43 static ATOMIC_NOTIFIER_HEAD(idle_notifier
);
45 void idle_notifier_register(struct notifier_block
*n
)
47 atomic_notifier_chain_register(&idle_notifier
, n
);
49 EXPORT_SYMBOL_GPL(idle_notifier_register
);
51 void idle_notifier_unregister(struct notifier_block
*n
)
53 atomic_notifier_chain_unregister(&idle_notifier
, n
);
55 EXPORT_SYMBOL_GPL(idle_notifier_unregister
);
58 struct kmem_cache
*task_xstate_cachep
;
59 EXPORT_SYMBOL_GPL(task_xstate_cachep
);
62 * this gets called so that we can store lazy state into memory and copy the
63 * current task into the new thread.
65 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
70 if (fpu_allocated(&src
->thread
.fpu
)) {
71 memset(&dst
->thread
.fpu
, 0, sizeof(dst
->thread
.fpu
));
72 ret
= fpu_alloc(&dst
->thread
.fpu
);
80 void free_thread_xstate(struct task_struct
*tsk
)
82 fpu_free(&tsk
->thread
.fpu
);
85 void arch_release_task_struct(struct task_struct
*tsk
)
87 free_thread_xstate(tsk
);
90 void arch_task_cache_init(void)
93 kmem_cache_create("task_xstate", xstate_size
,
94 __alignof__(union thread_xstate
),
95 SLAB_PANIC
| SLAB_NOTRACK
, NULL
);
100 * Free current thread data structures etc..
102 void exit_thread(void)
104 struct task_struct
*me
= current
;
105 struct thread_struct
*t
= &me
->thread
;
106 unsigned long *bp
= t
->io_bitmap_ptr
;
109 struct tss_struct
*tss
= &per_cpu(init_tss
, get_cpu());
111 t
->io_bitmap_ptr
= NULL
;
112 clear_thread_flag(TIF_IO_BITMAP
);
114 * Careful, clear this in the TSS too:
116 memset(tss
->io_bitmap
, 0xff, t
->io_bitmap_max
);
117 t
->io_bitmap_max
= 0;
125 void flush_thread(void)
127 struct task_struct
*tsk
= current
;
129 flush_ptrace_hw_breakpoint(tsk
);
130 memset(tsk
->thread
.tls_array
, 0, sizeof(tsk
->thread
.tls_array
));
133 * Free the FPU state for non xsave platforms. They get reallocated
134 * lazily at the first use.
136 if (!use_eager_fpu())
137 free_thread_xstate(tsk
);
140 static void hard_disable_TSC(void)
142 write_cr4(read_cr4() | X86_CR4_TSD
);
145 void disable_TSC(void)
148 if (!test_and_set_thread_flag(TIF_NOTSC
))
150 * Must flip the CPU state synchronously with
151 * TIF_NOTSC in the current running context.
157 static void hard_enable_TSC(void)
159 write_cr4(read_cr4() & ~X86_CR4_TSD
);
162 static void enable_TSC(void)
165 if (test_and_clear_thread_flag(TIF_NOTSC
))
167 * Must flip the CPU state synchronously with
168 * TIF_NOTSC in the current running context.
174 int get_tsc_mode(unsigned long adr
)
178 if (test_thread_flag(TIF_NOTSC
))
179 val
= PR_TSC_SIGSEGV
;
183 return put_user(val
, (unsigned int __user
*)adr
);
186 int set_tsc_mode(unsigned int val
)
188 if (val
== PR_TSC_SIGSEGV
)
190 else if (val
== PR_TSC_ENABLE
)
198 void __switch_to_xtra(struct task_struct
*prev_p
, struct task_struct
*next_p
,
199 struct tss_struct
*tss
)
201 struct thread_struct
*prev
, *next
;
203 prev
= &prev_p
->thread
;
204 next
= &next_p
->thread
;
206 if (test_tsk_thread_flag(prev_p
, TIF_BLOCKSTEP
) ^
207 test_tsk_thread_flag(next_p
, TIF_BLOCKSTEP
)) {
208 unsigned long debugctl
= get_debugctlmsr();
210 debugctl
&= ~DEBUGCTLMSR_BTF
;
211 if (test_tsk_thread_flag(next_p
, TIF_BLOCKSTEP
))
212 debugctl
|= DEBUGCTLMSR_BTF
;
214 update_debugctlmsr(debugctl
);
217 if (test_tsk_thread_flag(prev_p
, TIF_NOTSC
) ^
218 test_tsk_thread_flag(next_p
, TIF_NOTSC
)) {
219 /* prev and next are different */
220 if (test_tsk_thread_flag(next_p
, TIF_NOTSC
))
226 if (test_tsk_thread_flag(next_p
, TIF_IO_BITMAP
)) {
228 * Copy the relevant range of the IO bitmap.
229 * Normally this is 128 bytes or less:
231 memcpy(tss
->io_bitmap
, next
->io_bitmap_ptr
,
232 max(prev
->io_bitmap_max
, next
->io_bitmap_max
));
233 } else if (test_tsk_thread_flag(prev_p
, TIF_IO_BITMAP
)) {
235 * Clear any possible leftover bits:
237 memset(tss
->io_bitmap
, 0xff, prev
->io_bitmap_max
);
239 propagate_user_return_notify(prev_p
, next_p
);
243 * Idle related variables and functions
245 unsigned long boot_option_idle_override
= IDLE_NO_OVERRIDE
;
246 EXPORT_SYMBOL(boot_option_idle_override
);
248 static void (*x86_idle
)(void);
251 static inline void play_dead(void)
258 void enter_idle(void)
260 this_cpu_write(is_idle
, 1);
261 atomic_notifier_call_chain(&idle_notifier
, IDLE_START
, NULL
);
264 static void __exit_idle(void)
266 if (x86_test_and_clear_bit_percpu(0, is_idle
) == 0)
268 atomic_notifier_call_chain(&idle_notifier
, IDLE_END
, NULL
);
271 /* Called from interrupts to signify idle end */
274 /* idle loop has pid 0 */
281 void arch_cpu_idle_enter(void)
287 void arch_cpu_idle_exit(void)
292 void arch_cpu_idle_dead(void)
298 * Called from the generic idle code.
300 void arch_cpu_idle(void)
306 * We use this if we don't have any better idle routine..
308 void default_idle(void)
310 trace_cpu_idle_rcuidle(1, smp_processor_id());
312 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
314 #ifdef CONFIG_APM_MODULE
315 EXPORT_SYMBOL(default_idle
);
319 bool xen_set_default_idle(void)
321 bool ret
= !!x86_idle
;
323 x86_idle
= default_idle
;
328 void stop_this_cpu(void *dummy
)
334 set_cpu_online(smp_processor_id(), false);
335 disable_local_APIC();
341 bool amd_e400_c1e_detected
;
342 EXPORT_SYMBOL(amd_e400_c1e_detected
);
344 static cpumask_var_t amd_e400_c1e_mask
;
346 void amd_e400_remove_cpu(int cpu
)
348 if (amd_e400_c1e_mask
!= NULL
)
349 cpumask_clear_cpu(cpu
, amd_e400_c1e_mask
);
353 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
354 * pending message MSR. If we detect C1E, then we handle it the same
355 * way as C3 power states (local apic timer and TSC stop)
357 static void amd_e400_idle(void)
359 if (!amd_e400_c1e_detected
) {
362 rdmsr(MSR_K8_INT_PENDING_MSG
, lo
, hi
);
364 if (lo
& K8_INTP_C1E_ACTIVE_MASK
) {
365 amd_e400_c1e_detected
= true;
366 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC
))
367 mark_tsc_unstable("TSC halt in AMD C1E");
368 pr_info("System has AMD C1E enabled\n");
372 if (amd_e400_c1e_detected
) {
373 int cpu
= smp_processor_id();
375 if (!cpumask_test_cpu(cpu
, amd_e400_c1e_mask
)) {
376 cpumask_set_cpu(cpu
, amd_e400_c1e_mask
);
378 * Force broadcast so ACPI can not interfere.
380 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE
,
382 pr_info("Switch to broadcast mode on CPU%d\n", cpu
);
384 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER
, &cpu
);
389 * The switch back from broadcast mode needs to be
390 * called with interrupts disabled.
393 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT
, &cpu
);
399 void select_idle_routine(const struct cpuinfo_x86
*c
)
402 if (boot_option_idle_override
== IDLE_POLL
&& smp_num_siblings
> 1)
403 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
405 if (x86_idle
|| boot_option_idle_override
== IDLE_POLL
)
408 if (cpu_has_bug(c
, X86_BUG_AMD_APIC_C1E
)) {
409 /* E400: APIC timer interrupt does not wake up CPU from C1e */
410 pr_info("using AMD E400 aware idle routine\n");
411 x86_idle
= amd_e400_idle
;
413 x86_idle
= default_idle
;
416 void __init
init_amd_e400_c1e_mask(void)
418 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
419 if (x86_idle
== amd_e400_idle
)
420 zalloc_cpumask_var(&amd_e400_c1e_mask
, GFP_KERNEL
);
423 static int __init
idle_setup(char *str
)
428 if (!strcmp(str
, "poll")) {
429 pr_info("using polling idle threads\n");
430 boot_option_idle_override
= IDLE_POLL
;
431 cpu_idle_poll_ctrl(true);
432 } else if (!strcmp(str
, "halt")) {
434 * When the boot option of idle=halt is added, halt is
435 * forced to be used for CPU idle. In such case CPU C2/C3
436 * won't be used again.
437 * To continue to load the CPU idle driver, don't touch
438 * the boot_option_idle_override.
440 x86_idle
= default_idle
;
441 boot_option_idle_override
= IDLE_HALT
;
442 } else if (!strcmp(str
, "nomwait")) {
444 * If the boot option of "idle=nomwait" is added,
445 * it means that mwait will be disabled for CPU C2/C3
446 * states. In such case it won't touch the variable
447 * of boot_option_idle_override.
449 boot_option_idle_override
= IDLE_NOMWAIT
;
455 early_param("idle", idle_setup
);
457 unsigned long arch_align_stack(unsigned long sp
)
459 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
460 sp
-= get_random_int() % 8192;
464 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
466 unsigned long range_end
= mm
->brk
+ 0x02000000;
467 return randomize_range(mm
->brk
, range_end
, 0) ? : mm
->brk
;