2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
21 #include <linux/kernel.h>
23 #include <linux/elfcore.h>
24 #include <linux/smp.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/module.h>
30 #include <linux/ptrace.h>
31 #include <linux/notifier.h>
32 #include <linux/kprobes.h>
33 #include <linux/kdebug.h>
34 #include <linux/prctl.h>
35 #include <linux/uaccess.h>
37 #include <linux/ftrace.h>
39 #include <asm/pgtable.h>
40 #include <asm/processor.h>
42 #include <asm/fpu-internal.h>
43 #include <asm/mmu_context.h>
44 #include <asm/prctl.h>
46 #include <asm/proto.h>
49 #include <asm/syscalls.h>
50 #include <asm/debugreg.h>
51 #include <asm/switch_to.h>
53 asmlinkage
extern void ret_from_fork(void);
55 DEFINE_PER_CPU(unsigned long, old_rsp
);
57 /* Prints also some state that isn't saved in the pt_regs */
58 void __show_regs(struct pt_regs
*regs
, int all
)
60 unsigned long cr0
= 0L, cr2
= 0L, cr3
= 0L, cr4
= 0L, fs
, gs
, shadowgs
;
61 unsigned long d0
, d1
, d2
, d3
, d6
, d7
;
62 unsigned int fsindex
, gsindex
;
63 unsigned int ds
, cs
, es
;
66 printk(KERN_DEFAULT
"RIP: %04lx:[<%016lx>] ", regs
->cs
& 0xffff, regs
->ip
);
67 printk_address(regs
->ip
, 1);
68 printk(KERN_DEFAULT
"RSP: %04lx:%016lx EFLAGS: %08lx\n", regs
->ss
,
69 regs
->sp
, regs
->flags
);
70 printk(KERN_DEFAULT
"RAX: %016lx RBX: %016lx RCX: %016lx\n",
71 regs
->ax
, regs
->bx
, regs
->cx
);
72 printk(KERN_DEFAULT
"RDX: %016lx RSI: %016lx RDI: %016lx\n",
73 regs
->dx
, regs
->si
, regs
->di
);
74 printk(KERN_DEFAULT
"RBP: %016lx R08: %016lx R09: %016lx\n",
75 regs
->bp
, regs
->r8
, regs
->r9
);
76 printk(KERN_DEFAULT
"R10: %016lx R11: %016lx R12: %016lx\n",
77 regs
->r10
, regs
->r11
, regs
->r12
);
78 printk(KERN_DEFAULT
"R13: %016lx R14: %016lx R15: %016lx\n",
79 regs
->r13
, regs
->r14
, regs
->r15
);
81 asm("movl %%ds,%0" : "=r" (ds
));
82 asm("movl %%cs,%0" : "=r" (cs
));
83 asm("movl %%es,%0" : "=r" (es
));
84 asm("movl %%fs,%0" : "=r" (fsindex
));
85 asm("movl %%gs,%0" : "=r" (gsindex
));
87 rdmsrl(MSR_FS_BASE
, fs
);
88 rdmsrl(MSR_GS_BASE
, gs
);
89 rdmsrl(MSR_KERNEL_GS_BASE
, shadowgs
);
99 printk(KERN_DEFAULT
"FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
100 fs
, fsindex
, gs
, gsindex
, shadowgs
);
101 printk(KERN_DEFAULT
"CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs
, ds
,
103 printk(KERN_DEFAULT
"CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2
, cr3
,
109 printk(KERN_DEFAULT
"DR0: %016lx DR1: %016lx DR2: %016lx\n", d0
, d1
, d2
);
113 printk(KERN_DEFAULT
"DR3: %016lx DR6: %016lx DR7: %016lx\n", d3
, d6
, d7
);
116 void release_thread(struct task_struct
*dead_task
)
119 if (dead_task
->mm
->context
.size
) {
120 pr_warn("WARNING: dead process %8s still has LDT? <%p/%d>\n",
122 dead_task
->mm
->context
.ldt
,
123 dead_task
->mm
->context
.size
);
129 static inline void set_32bit_tls(struct task_struct
*t
, int tls
, u32 addr
)
131 struct user_desc ud
= {
138 struct desc_struct
*desc
= t
->thread
.tls_array
;
143 static inline u32
read_32bit_tls(struct task_struct
*t
, int tls
)
145 return get_desc_base(&t
->thread
.tls_array
[tls
]);
148 int copy_thread(unsigned long clone_flags
, unsigned long sp
,
150 struct task_struct
*p
, struct pt_regs
*regs
)
153 struct pt_regs
*childregs
;
154 struct task_struct
*me
= current
;
156 p
->thread
.sp0
= (unsigned long)task_stack_page(p
) + THREAD_SIZE
;
157 childregs
= task_pt_regs(p
);
158 p
->thread
.sp
= (unsigned long) childregs
;
159 p
->thread
.usersp
= me
->thread
.usersp
;
160 set_tsk_thread_flag(p
, TIF_FORK
);
162 p
->thread
.io_bitmap_ptr
= NULL
;
164 savesegment(gs
, p
->thread
.gsindex
);
165 p
->thread
.gs
= p
->thread
.gsindex
? 0 : me
->thread
.gs
;
166 savesegment(fs
, p
->thread
.fsindex
);
167 p
->thread
.fs
= p
->thread
.fsindex
? 0 : me
->thread
.fs
;
168 savesegment(es
, p
->thread
.es
);
169 savesegment(ds
, p
->thread
.ds
);
170 memset(p
->thread
.ptrace_bps
, 0, sizeof(p
->thread
.ptrace_bps
));
172 if (unlikely(p
->flags
& PF_KTHREAD
)) {
174 memset(childregs
, 0, sizeof(struct pt_regs
));
175 childregs
->sp
= (unsigned long)childregs
;
176 childregs
->ss
= __KERNEL_DS
;
177 childregs
->bx
= sp
; /* function */
179 childregs
->orig_ax
= -1;
180 childregs
->cs
= __KERNEL_CS
| get_kernel_rpl();
181 childregs
->flags
= X86_EFLAGS_IF
| X86_EFLAGS_BIT1
;
184 *childregs
= *current_pt_regs();
191 memset(p
->thread
.ptrace_bps
, 0, sizeof(p
->thread
.ptrace_bps
));
193 if (unlikely(test_tsk_thread_flag(me
, TIF_IO_BITMAP
))) {
194 p
->thread
.io_bitmap_ptr
= kmemdup(me
->thread
.io_bitmap_ptr
,
195 IO_BITMAP_BYTES
, GFP_KERNEL
);
196 if (!p
->thread
.io_bitmap_ptr
) {
197 p
->thread
.io_bitmap_max
= 0;
200 set_tsk_thread_flag(p
, TIF_IO_BITMAP
);
204 * Set a new TLS for the child thread?
206 if (clone_flags
& CLONE_SETTLS
) {
207 #ifdef CONFIG_IA32_EMULATION
208 if (test_thread_flag(TIF_IA32
))
209 err
= do_set_thread_area(p
, -1,
210 (struct user_desc __user
*)childregs
->si
, 0);
213 err
= do_arch_prctl(p
, ARCH_SET_FS
, childregs
->r8
);
219 if (err
&& p
->thread
.io_bitmap_ptr
) {
220 kfree(p
->thread
.io_bitmap_ptr
);
221 p
->thread
.io_bitmap_max
= 0;
228 start_thread_common(struct pt_regs
*regs
, unsigned long new_ip
,
229 unsigned long new_sp
,
230 unsigned int _cs
, unsigned int _ss
, unsigned int _ds
)
233 loadsegment(es
, _ds
);
234 loadsegment(ds
, _ds
);
236 current
->thread
.usersp
= new_sp
;
239 this_cpu_write(old_rsp
, new_sp
);
242 regs
->flags
= X86_EFLAGS_IF
;
246 start_thread(struct pt_regs
*regs
, unsigned long new_ip
, unsigned long new_sp
)
248 start_thread_common(regs
, new_ip
, new_sp
,
249 __USER_CS
, __USER_DS
, 0);
252 #ifdef CONFIG_IA32_EMULATION
253 void start_thread_ia32(struct pt_regs
*regs
, u32 new_ip
, u32 new_sp
)
255 start_thread_common(regs
, new_ip
, new_sp
,
256 test_thread_flag(TIF_X32
)
257 ? __USER_CS
: __USER32_CS
,
258 __USER_DS
, __USER_DS
);
263 * switch_to(x,y) should switch tasks from x to y.
265 * This could still be optimized:
266 * - fold all the options into a flag word and test it with a single test.
267 * - could test fs/gs bitsliced
269 * Kprobes not supported here. Set the probe on schedule instead.
270 * Function graph tracer not supported too.
272 __notrace_funcgraph
struct task_struct
*
273 __switch_to(struct task_struct
*prev_p
, struct task_struct
*next_p
)
275 struct thread_struct
*prev
= &prev_p
->thread
;
276 struct thread_struct
*next
= &next_p
->thread
;
277 int cpu
= smp_processor_id();
278 struct tss_struct
*tss
= &per_cpu(init_tss
, cpu
);
279 unsigned fsindex
, gsindex
;
282 fpu
= switch_fpu_prepare(prev_p
, next_p
, cpu
);
285 * Reload esp0, LDT and the page table pointer:
291 * This won't pick up thread selector changes, but I guess that is ok.
293 savesegment(es
, prev
->es
);
294 if (unlikely(next
->es
| prev
->es
))
295 loadsegment(es
, next
->es
);
297 savesegment(ds
, prev
->ds
);
298 if (unlikely(next
->ds
| prev
->ds
))
299 loadsegment(ds
, next
->ds
);
302 /* We must save %fs and %gs before load_TLS() because
303 * %fs and %gs may be cleared by load_TLS().
305 * (e.g. xen_load_tls())
307 savesegment(fs
, fsindex
);
308 savesegment(gs
, gsindex
);
313 * Leave lazy mode, flushing any hypercalls made here.
314 * This must be done before restoring TLS segments so
315 * the GDT and LDT are properly updated, and must be
316 * done before math_state_restore, so the TS bit is up
319 arch_end_context_switch(next_p
);
324 * Segment register != 0 always requires a reload. Also
325 * reload when it has changed. When prev process used 64bit
326 * base always reload to avoid an information leak.
328 if (unlikely(fsindex
| next
->fsindex
| prev
->fs
)) {
329 loadsegment(fs
, next
->fsindex
);
331 * Check if the user used a selector != 0; if yes
332 * clear 64bit base, since overloaded base is always
333 * mapped to the Null selector
338 /* when next process has a 64bit base use it */
340 wrmsrl(MSR_FS_BASE
, next
->fs
);
341 prev
->fsindex
= fsindex
;
343 if (unlikely(gsindex
| next
->gsindex
| prev
->gs
)) {
344 load_gs_index(next
->gsindex
);
349 wrmsrl(MSR_KERNEL_GS_BASE
, next
->gs
);
350 prev
->gsindex
= gsindex
;
352 switch_fpu_finish(next_p
, fpu
);
355 * Switch the PDA and FPU contexts.
357 prev
->usersp
= this_cpu_read(old_rsp
);
358 this_cpu_write(old_rsp
, next
->usersp
);
359 this_cpu_write(current_task
, next_p
);
361 this_cpu_write(kernel_stack
,
362 (unsigned long)task_stack_page(next_p
) +
363 THREAD_SIZE
- KERNEL_STACK_OFFSET
);
366 * Now maybe reload the debug registers and handle I/O bitmaps
368 if (unlikely(task_thread_info(next_p
)->flags
& _TIF_WORK_CTXSW_NEXT
||
369 task_thread_info(prev_p
)->flags
& _TIF_WORK_CTXSW_PREV
))
370 __switch_to_xtra(prev_p
, next_p
, tss
);
375 void set_personality_64bit(void)
377 /* inherit personality from parent */
379 /* Make sure to be in 64bit mode */
380 clear_thread_flag(TIF_IA32
);
381 clear_thread_flag(TIF_ADDR32
);
382 clear_thread_flag(TIF_X32
);
384 /* Ensure the corresponding mm is not marked. */
386 current
->mm
->context
.ia32_compat
= 0;
388 /* TBD: overwrites user setup. Should have two bits.
389 But 64bit processes have always behaved this way,
390 so it's not too bad. The main problem is just that
391 32bit childs are affected again. */
392 current
->personality
&= ~READ_IMPLIES_EXEC
;
395 void set_personality_ia32(bool x32
)
397 /* inherit personality from parent */
399 /* Make sure to be in 32bit mode */
400 set_thread_flag(TIF_ADDR32
);
402 /* Mark the associated mm as containing 32-bit tasks. */
404 current
->mm
->context
.ia32_compat
= 1;
407 clear_thread_flag(TIF_IA32
);
408 set_thread_flag(TIF_X32
);
409 current
->personality
&= ~READ_IMPLIES_EXEC
;
410 /* is_compat_task() uses the presence of the x32
411 syscall bit flag to determine compat status */
412 current_thread_info()->status
&= ~TS_COMPAT
;
414 set_thread_flag(TIF_IA32
);
415 clear_thread_flag(TIF_X32
);
416 current
->personality
|= force_personality32
;
417 /* Prepare the first "return" to user space */
418 current_thread_info()->status
|= TS_COMPAT
;
421 EXPORT_SYMBOL_GPL(set_personality_ia32
);
423 unsigned long get_wchan(struct task_struct
*p
)
429 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
431 stack
= (unsigned long)task_stack_page(p
);
432 if (p
->thread
.sp
< stack
|| p
->thread
.sp
>= stack
+THREAD_SIZE
)
434 fp
= *(u64
*)(p
->thread
.sp
);
436 if (fp
< (unsigned long)stack
||
437 fp
>= (unsigned long)stack
+THREAD_SIZE
)
440 if (!in_sched_functions(ip
))
443 } while (count
++ < 16);
447 long do_arch_prctl(struct task_struct
*task
, int code
, unsigned long addr
)
450 int doit
= task
== current
;
455 if (addr
>= TASK_SIZE_OF(task
))
458 /* handle small bases via the GDT because that's faster to
460 if (addr
<= 0xffffffff) {
461 set_32bit_tls(task
, GS_TLS
, addr
);
463 load_TLS(&task
->thread
, cpu
);
464 load_gs_index(GS_TLS_SEL
);
466 task
->thread
.gsindex
= GS_TLS_SEL
;
469 task
->thread
.gsindex
= 0;
470 task
->thread
.gs
= addr
;
473 ret
= wrmsrl_safe(MSR_KERNEL_GS_BASE
, addr
);
479 /* Not strictly needed for fs, but do it for symmetry
481 if (addr
>= TASK_SIZE_OF(task
))
484 /* handle small bases via the GDT because that's faster to
486 if (addr
<= 0xffffffff) {
487 set_32bit_tls(task
, FS_TLS
, addr
);
489 load_TLS(&task
->thread
, cpu
);
490 loadsegment(fs
, FS_TLS_SEL
);
492 task
->thread
.fsindex
= FS_TLS_SEL
;
495 task
->thread
.fsindex
= 0;
496 task
->thread
.fs
= addr
;
498 /* set the selector to 0 to not confuse
501 ret
= wrmsrl_safe(MSR_FS_BASE
, addr
);
508 if (task
->thread
.fsindex
== FS_TLS_SEL
)
509 base
= read_32bit_tls(task
, FS_TLS
);
511 rdmsrl(MSR_FS_BASE
, base
);
513 base
= task
->thread
.fs
;
514 ret
= put_user(base
, (unsigned long __user
*)addr
);
520 if (task
->thread
.gsindex
== GS_TLS_SEL
)
521 base
= read_32bit_tls(task
, GS_TLS
);
523 savesegment(gs
, gsindex
);
525 rdmsrl(MSR_KERNEL_GS_BASE
, base
);
527 base
= task
->thread
.gs
;
529 base
= task
->thread
.gs
;
530 ret
= put_user(base
, (unsigned long __user
*)addr
);
542 long sys_arch_prctl(int code
, unsigned long addr
)
544 return do_arch_prctl(current
, code
, addr
);
547 unsigned long KSTK_ESP(struct task_struct
*task
)
549 return (test_tsk_thread_flag(task
, TIF_IA32
)) ?
550 (task_pt_regs(task
)->sp
) : ((task
)->thread
.usersp
);
This page took 0.042855 seconds and 6 git commands to generate.