Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1995 Linus Torvalds |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
6 | */ | |
7 | ||
8 | /* | |
9 | * This file handles the architecture-dependent parts of process handling.. | |
10 | */ | |
11 | ||
5c79d2a5 | 12 | #include <linux/stackprotector.h> |
f3705136 | 13 | #include <linux/cpu.h> |
1da177e4 LT |
14 | #include <linux/errno.h> |
15 | #include <linux/sched.h> | |
16 | #include <linux/fs.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/elfcore.h> | |
20 | #include <linux/smp.h> | |
1da177e4 LT |
21 | #include <linux/stddef.h> |
22 | #include <linux/slab.h> | |
23 | #include <linux/vmalloc.h> | |
24 | #include <linux/user.h> | |
1da177e4 | 25 | #include <linux/interrupt.h> |
1da177e4 LT |
26 | #include <linux/delay.h> |
27 | #include <linux/reboot.h> | |
28 | #include <linux/init.h> | |
29 | #include <linux/mc146818rtc.h> | |
30 | #include <linux/module.h> | |
31 | #include <linux/kallsyms.h> | |
32 | #include <linux/ptrace.h> | |
c16b63e0 | 33 | #include <linux/personality.h> |
74167347 | 34 | #include <linux/tick.h> |
7c3576d2 | 35 | #include <linux/percpu.h> |
529e25f6 | 36 | #include <linux/prctl.h> |
8b96f011 | 37 | #include <linux/ftrace.h> |
befa9e78 JSR |
38 | #include <linux/uaccess.h> |
39 | #include <linux/io.h> | |
40 | #include <linux/kdebug.h> | |
a0bfa137 | 41 | #include <linux/cpuidle.h> |
1da177e4 | 42 | |
1da177e4 LT |
43 | #include <asm/pgtable.h> |
44 | #include <asm/system.h> | |
1da177e4 LT |
45 | #include <asm/ldt.h> |
46 | #include <asm/processor.h> | |
47 | #include <asm/i387.h> | |
1da177e4 LT |
48 | #include <asm/desc.h> |
49 | #ifdef CONFIG_MATH_EMULATION | |
50 | #include <asm/math_emu.h> | |
51 | #endif | |
52 | ||
1da177e4 LT |
53 | #include <linux/err.h> |
54 | ||
f3705136 ZM |
55 | #include <asm/tlbflush.h> |
56 | #include <asm/cpu.h> | |
1eda8149 | 57 | #include <asm/idle.h> |
bbc1f698 | 58 | #include <asm/syscalls.h> |
66cb5917 | 59 | #include <asm/debugreg.h> |
b227e233 | 60 | #include <asm/nmi.h> |
f3705136 | 61 | |
1da177e4 LT |
62 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
63 | ||
1da177e4 LT |
64 | /* |
65 | * Return saved PC of a blocked thread. | |
66 | */ | |
67 | unsigned long thread_saved_pc(struct task_struct *tsk) | |
68 | { | |
faca6227 | 69 | return ((unsigned long *)tsk->thread.sp)[3]; |
1da177e4 LT |
70 | } |
71 | ||
913da64b AN |
72 | #ifndef CONFIG_SMP |
73 | static inline void play_dead(void) | |
74 | { | |
75 | BUG(); | |
76 | } | |
77 | #endif | |
78 | ||
1da177e4 LT |
79 | /* |
80 | * The idle thread. There's no useful work to be | |
81 | * done, so just try to conserve power and have a | |
82 | * low exit latency (ie sit in a loop waiting for | |
83 | * somebody to say that they'd like to reschedule) | |
84 | */ | |
f3705136 | 85 | void cpu_idle(void) |
1da177e4 | 86 | { |
5bfb5d69 | 87 | int cpu = smp_processor_id(); |
f3705136 | 88 | |
5c79d2a5 TH |
89 | /* |
90 | * If we're the non-boot CPU, nothing set the stack canary up | |
91 | * for us. CPU0 already has it initialized but no harm in | |
92 | * doing it again. This is a good place for updating it, as | |
93 | * we wont ever return from this function (so the invalid | |
94 | * canaries already on the stack wont ever trigger). | |
95 | */ | |
96 | boot_init_stack_canary(); | |
97 | ||
495ab9c0 | 98 | current_thread_info()->status |= TS_POLLING; |
64c7c8f8 | 99 | |
1da177e4 LT |
100 | /* endless idle loop with no priority at all */ |
101 | while (1) { | |
2bbb6817 | 102 | tick_nohz_idle_enter_norcu(); |
1da177e4 | 103 | while (!need_resched()) { |
1da177e4 | 104 | |
f1d1a842 | 105 | check_pgt_cache(); |
1da177e4 | 106 | rmb(); |
1da177e4 | 107 | |
f3705136 ZM |
108 | if (cpu_is_offline(cpu)) |
109 | play_dead(); | |
110 | ||
b227e233 | 111 | local_touch_nmi(); |
7f424a8b | 112 | local_irq_disable(); |
6cd8a4bb SR |
113 | /* Don't trace irqs off for idle */ |
114 | stop_critical_timings(); | |
a0bfa137 LB |
115 | if (cpuidle_idle_call()) |
116 | pm_idle(); | |
6cd8a4bb | 117 | start_critical_timings(); |
1da177e4 | 118 | } |
2bbb6817 | 119 | tick_nohz_idle_exit_norcu(); |
5bfb5d69 | 120 | preempt_enable_no_resched(); |
1da177e4 | 121 | schedule(); |
5bfb5d69 | 122 | preempt_disable(); |
1da177e4 LT |
123 | } |
124 | } | |
125 | ||
e2ce07c8 | 126 | void __show_regs(struct pt_regs *regs, int all) |
1da177e4 LT |
127 | { |
128 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; | |
bb1995d5 | 129 | unsigned long d0, d1, d2, d3, d6, d7; |
65ea5b03 | 130 | unsigned long sp; |
9d975ebd PE |
131 | unsigned short ss, gs; |
132 | ||
133 | if (user_mode_vm(regs)) { | |
65ea5b03 PA |
134 | sp = regs->sp; |
135 | ss = regs->ss & 0xffff; | |
d9a89a26 | 136 | gs = get_user_gs(regs); |
9d975ebd | 137 | } else { |
def3c5d0 | 138 | sp = kernel_stack_pointer(regs); |
9d975ebd PE |
139 | savesegment(ss, ss); |
140 | savesegment(gs, gs); | |
141 | } | |
1da177e4 | 142 | |
814e2c84 | 143 | show_regs_common(); |
9d975ebd | 144 | |
d015a092 | 145 | printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", |
92bc2056 | 146 | (u16)regs->cs, regs->ip, regs->flags, |
9d975ebd | 147 | smp_processor_id()); |
65ea5b03 | 148 | print_symbol("EIP is at %s\n", regs->ip); |
1da177e4 | 149 | |
d015a092 | 150 | printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", |
65ea5b03 | 151 | regs->ax, regs->bx, regs->cx, regs->dx); |
d015a092 | 152 | printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", |
65ea5b03 | 153 | regs->si, regs->di, regs->bp, sp); |
d015a092 | 154 | printk(KERN_DEFAULT " DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", |
92bc2056 | 155 | (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); |
9d975ebd PE |
156 | |
157 | if (!all) | |
158 | return; | |
1da177e4 | 159 | |
4bb0d3ec ZA |
160 | cr0 = read_cr0(); |
161 | cr2 = read_cr2(); | |
162 | cr3 = read_cr3(); | |
ff6e8c0d | 163 | cr4 = read_cr4_safe(); |
d015a092 | 164 | printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", |
9d975ebd | 165 | cr0, cr2, cr3, cr4); |
bb1995d5 AS |
166 | |
167 | get_debugreg(d0, 0); | |
168 | get_debugreg(d1, 1); | |
169 | get_debugreg(d2, 2); | |
170 | get_debugreg(d3, 3); | |
d015a092 | 171 | printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", |
bb1995d5 | 172 | d0, d1, d2, d3); |
9d975ebd | 173 | |
bb1995d5 AS |
174 | get_debugreg(d6, 6); |
175 | get_debugreg(d7, 7); | |
d015a092 | 176 | printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n", |
9d975ebd PE |
177 | d6, d7); |
178 | } | |
bb1995d5 | 179 | |
1da177e4 LT |
180 | void release_thread(struct task_struct *dead_task) |
181 | { | |
2684927c | 182 | BUG_ON(dead_task->mm); |
1da177e4 LT |
183 | release_vm86_irqs(dead_task); |
184 | } | |
185 | ||
186 | /* | |
187 | * This gets called before we allocate a new thread and copy | |
188 | * the current task into it. | |
189 | */ | |
190 | void prepare_to_copy(struct task_struct *tsk) | |
191 | { | |
192 | unlazy_fpu(tsk); | |
193 | } | |
194 | ||
6f2c55b8 | 195 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
1da177e4 | 196 | unsigned long unused, |
befa9e78 | 197 | struct task_struct *p, struct pt_regs *regs) |
1da177e4 | 198 | { |
befa9e78 | 199 | struct pt_regs *childregs; |
1da177e4 LT |
200 | struct task_struct *tsk; |
201 | int err; | |
202 | ||
07b047fc | 203 | childregs = task_pt_regs(p); |
f48d9663 | 204 | *childregs = *regs; |
65ea5b03 PA |
205 | childregs->ax = 0; |
206 | childregs->sp = sp; | |
f48d9663 | 207 | |
faca6227 PA |
208 | p->thread.sp = (unsigned long) childregs; |
209 | p->thread.sp0 = (unsigned long) (childregs+1); | |
1da177e4 | 210 | |
faca6227 | 211 | p->thread.ip = (unsigned long) ret_from_fork; |
1da177e4 | 212 | |
d9a89a26 | 213 | task_user_gs(p) = get_user_gs(regs); |
1da177e4 | 214 | |
66cb5917 | 215 | p->thread.io_bitmap_ptr = NULL; |
1da177e4 | 216 | tsk = current; |
66cb5917 | 217 | err = -ENOMEM; |
24f1e32c FW |
218 | |
219 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); | |
66cb5917 | 220 | |
b3cf2576 | 221 | if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { |
52978be6 AD |
222 | p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, |
223 | IO_BITMAP_BYTES, GFP_KERNEL); | |
1da177e4 LT |
224 | if (!p->thread.io_bitmap_ptr) { |
225 | p->thread.io_bitmap_max = 0; | |
226 | return -ENOMEM; | |
227 | } | |
b3cf2576 | 228 | set_tsk_thread_flag(p, TIF_IO_BITMAP); |
1da177e4 LT |
229 | } |
230 | ||
efd1ca52 RM |
231 | err = 0; |
232 | ||
1da177e4 LT |
233 | /* |
234 | * Set a new TLS for the child thread? | |
235 | */ | |
efd1ca52 RM |
236 | if (clone_flags & CLONE_SETTLS) |
237 | err = do_set_thread_area(p, -1, | |
65ea5b03 | 238 | (struct user_desc __user *)childregs->si, 0); |
1da177e4 | 239 | |
1da177e4 LT |
240 | if (err && p->thread.io_bitmap_ptr) { |
241 | kfree(p->thread.io_bitmap_ptr); | |
242 | p->thread.io_bitmap_max = 0; | |
243 | } | |
244 | return err; | |
245 | } | |
246 | ||
513ad84b IM |
247 | void |
248 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | |
249 | { | |
d9a89a26 | 250 | set_user_gs(regs, 0); |
513ad84b | 251 | regs->fs = 0; |
513ad84b IM |
252 | regs->ds = __USER_DS; |
253 | regs->es = __USER_DS; | |
254 | regs->ss = __USER_DS; | |
255 | regs->cs = __USER_CS; | |
256 | regs->ip = new_ip; | |
257 | regs->sp = new_sp; | |
aa283f49 SS |
258 | /* |
259 | * Free the old FP and other extended state | |
260 | */ | |
261 | free_thread_xstate(current); | |
513ad84b IM |
262 | } |
263 | EXPORT_SYMBOL_GPL(start_thread); | |
264 | ||
1da177e4 LT |
265 | |
266 | /* | |
ea70ef3d | 267 | * switch_to(x,y) should switch tasks from x to y. |
1da177e4 LT |
268 | * |
269 | * We fsave/fwait so that an exception goes off at the right time | |
270 | * (as a call from the fsave or fwait in effect) rather than to | |
271 | * the wrong process. Lazy FP saving no longer makes any sense | |
272 | * with modern CPU's, and this simplifies a lot of things (SMP | |
273 | * and UP become the same). | |
274 | * | |
275 | * NOTE! We used to use the x86 hardware context switching. The | |
276 | * reason for not using it any more becomes apparent when you | |
277 | * try to recover gracefully from saved state that is no longer | |
278 | * valid (stale segment register values in particular). With the | |
279 | * hardware task-switch, there is no way to fix up bad state in | |
280 | * a reasonable manner. | |
281 | * | |
282 | * The fact that Intel documents the hardware task-switching to | |
283 | * be slow is a fairly red herring - this code is not noticeably | |
284 | * faster. However, there _is_ some room for improvement here, | |
285 | * so the performance issues may eventually be a valid point. | |
286 | * More important, however, is the fact that this allows us much | |
287 | * more flexibility. | |
288 | * | |
65ea5b03 | 289 | * The return value (in %ax) will be the "prev" task after |
1da177e4 LT |
290 | * the task-switch, and shows up in ret_from_fork in entry.S, |
291 | * for example. | |
292 | */ | |
8b96f011 FW |
293 | __notrace_funcgraph struct task_struct * |
294 | __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |
1da177e4 LT |
295 | { |
296 | struct thread_struct *prev = &prev_p->thread, | |
297 | *next = &next_p->thread; | |
298 | int cpu = smp_processor_id(); | |
299 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | |
2fcddce1 | 300 | bool preload_fpu; |
1da177e4 LT |
301 | |
302 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | |
303 | ||
2fcddce1 JF |
304 | /* |
305 | * If the task has used fpu the last 5 timeslices, just do a full | |
306 | * restore of the math state immediately to avoid the trap; the | |
307 | * chances of needing FPU soon are obviously high now | |
308 | */ | |
309 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | |
1da177e4 | 310 | |
2fcddce1 | 311 | __unlazy_fpu(prev_p); |
acc20761 CE |
312 | |
313 | /* we're going to use this soon, after a few expensive things */ | |
2fcddce1 | 314 | if (preload_fpu) |
86603283 | 315 | prefetch(next->fpu.state); |
acc20761 | 316 | |
1da177e4 | 317 | /* |
e7a2ff59 | 318 | * Reload esp0. |
1da177e4 | 319 | */ |
faca6227 | 320 | load_sp0(tss, next); |
1da177e4 LT |
321 | |
322 | /* | |
464d1a78 | 323 | * Save away %gs. No need to save %fs, as it was saved on the |
f95d47ca JF |
324 | * stack on entry. No need to save %es and %ds, as those are |
325 | * always kernel segments while inside the kernel. Doing this | |
326 | * before setting the new TLS descriptors avoids the situation | |
327 | * where we temporarily have non-reloadable segments in %fs | |
328 | * and %gs. This could be an issue if the NMI handler ever | |
329 | * used %fs or %gs (it does not today), or if the kernel is | |
330 | * running inside of a hypervisor layer. | |
1da177e4 | 331 | */ |
ccbeed3a | 332 | lazy_save_gs(prev->gs); |
1da177e4 LT |
333 | |
334 | /* | |
e7a2ff59 | 335 | * Load the per-thread Thread-Local Storage descriptor. |
1da177e4 | 336 | */ |
e7a2ff59 | 337 | load_TLS(next, cpu); |
1da177e4 | 338 | |
8b151144 ZA |
339 | /* |
340 | * Restore IOPL if needed. In normal use, the flags restore | |
341 | * in the switch assembly will handle this. But if the kernel | |
342 | * is running virtualized at a non-zero CPL, the popf will | |
343 | * not restore flags, so it must be done in a separate step. | |
344 | */ | |
345 | if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) | |
346 | set_iopl_mask(next->iopl); | |
347 | ||
1da177e4 | 348 | /* |
b3cf2576 | 349 | * Now maybe handle debug registers and/or IO bitmaps |
1da177e4 | 350 | */ |
cf99abac AA |
351 | if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || |
352 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) | |
353 | __switch_to_xtra(prev_p, next_p, tss); | |
ffaa8bd6 | 354 | |
2fcddce1 JF |
355 | /* If we're going to preload the fpu context, make sure clts |
356 | is run while we're batching the cpu state updates. */ | |
357 | if (preload_fpu) | |
358 | clts(); | |
359 | ||
9226d125 ZA |
360 | /* |
361 | * Leave lazy mode, flushing any hypercalls made here. | |
362 | * This must be done before restoring TLS segments so | |
363 | * the GDT and LDT are properly updated, and must be | |
364 | * done before math_state_restore, so the TS bit is up | |
365 | * to date. | |
366 | */ | |
224101ed | 367 | arch_end_context_switch(next_p); |
9226d125 | 368 | |
2fcddce1 JF |
369 | if (preload_fpu) |
370 | __math_state_restore(); | |
acc20761 | 371 | |
9226d125 ZA |
372 | /* |
373 | * Restore %gs if needed (which is common) | |
374 | */ | |
375 | if (prev->gs | next->gs) | |
ccbeed3a | 376 | lazy_load_gs(next->gs); |
9226d125 | 377 | |
6dbde353 | 378 | percpu_write(current_task, next_p); |
9226d125 | 379 | |
1da177e4 LT |
380 | return prev_p; |
381 | } | |
382 | ||
1da177e4 LT |
383 | #define top_esp (THREAD_SIZE - sizeof(unsigned long)) |
384 | #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) | |
385 | ||
386 | unsigned long get_wchan(struct task_struct *p) | |
387 | { | |
65ea5b03 | 388 | unsigned long bp, sp, ip; |
1da177e4 LT |
389 | unsigned long stack_page; |
390 | int count = 0; | |
391 | if (!p || p == current || p->state == TASK_RUNNING) | |
392 | return 0; | |
65e0fdff | 393 | stack_page = (unsigned long)task_stack_page(p); |
faca6227 | 394 | sp = p->thread.sp; |
65ea5b03 | 395 | if (!stack_page || sp < stack_page || sp > top_esp+stack_page) |
1da177e4 | 396 | return 0; |
65ea5b03 PA |
397 | /* include/asm-i386/system.h:switch_to() pushes bp last. */ |
398 | bp = *(unsigned long *) sp; | |
1da177e4 | 399 | do { |
65ea5b03 | 400 | if (bp < stack_page || bp > top_ebp+stack_page) |
1da177e4 | 401 | return 0; |
65ea5b03 PA |
402 | ip = *(unsigned long *) (bp+4); |
403 | if (!in_sched_functions(ip)) | |
404 | return ip; | |
405 | bp = *(unsigned long *) bp; | |
1da177e4 LT |
406 | } while (count++ < 16); |
407 | return 0; | |
408 | } | |
409 |