x86, um: switch to generic fork/vfork/clone
[deliverable/linux.git] / arch / x86 / kernel / process_64.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6612538c 6 *
1da177e4
LT
7 * X86-64 port
8 * Andi Kleen.
76e4f660
AR
9 *
10 * CPU hotplug support - ashok.raj@intel.com
1da177e4
LT
11 */
12
13/*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
76e4f660 17#include <linux/cpu.h>
1da177e4
LT
18#include <linux/errno.h>
19#include <linux/sched.h>
6612538c 20#include <linux/fs.h>
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/elfcore.h>
24#include <linux/smp.h>
25#include <linux/slab.h>
26#include <linux/user.h>
1da177e4
LT
27#include <linux/interrupt.h>
28#include <linux/delay.h>
6612538c 29#include <linux/module.h>
1da177e4 30#include <linux/ptrace.h>
95833c83 31#include <linux/notifier.h>
c6fd91f0 32#include <linux/kprobes.h>
1eeb66a1 33#include <linux/kdebug.h>
529e25f6 34#include <linux/prctl.h>
7de08b4e
GP
35#include <linux/uaccess.h>
36#include <linux/io.h>
8b96f011 37#include <linux/ftrace.h>
1da177e4 38
1da177e4 39#include <asm/pgtable.h>
1da177e4
LT
40#include <asm/processor.h>
41#include <asm/i387.h>
1361b83a 42#include <asm/fpu-internal.h>
1da177e4 43#include <asm/mmu_context.h>
1da177e4 44#include <asm/prctl.h>
1da177e4
LT
45#include <asm/desc.h>
46#include <asm/proto.h>
47#include <asm/ia32.h>
95833c83 48#include <asm/idle.h>
bbc1f698 49#include <asm/syscalls.h>
66cb5917 50#include <asm/debugreg.h>
f05e798a 51#include <asm/switch_to.h>
1da177e4
LT
52
53asmlinkage extern void ret_from_fork(void);
54
3d1e42a7 55DEFINE_PER_CPU(unsigned long, old_rsp);
1da177e4 56
6612538c 57/* Prints also some state that isn't saved in the pt_regs */
e2ce07c8 58void __show_regs(struct pt_regs *regs, int all)
1da177e4
LT
59{
60 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
bb1995d5 61 unsigned long d0, d1, d2, d3, d6, d7;
6612538c
HS
62 unsigned int fsindex, gsindex;
63 unsigned int ds, cs, es;
814e2c84
AI
64
65 show_regs_common();
d015a092 66 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
aafbd7eb 67 printk_address(regs->ip, 1);
d015a092 68 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
8092c654 69 regs->sp, regs->flags);
d015a092 70 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
65ea5b03 71 regs->ax, regs->bx, regs->cx);
d015a092 72 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
65ea5b03 73 regs->dx, regs->si, regs->di);
d015a092 74 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
65ea5b03 75 regs->bp, regs->r8, regs->r9);
d015a092 76 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
7de08b4e 77 regs->r10, regs->r11, regs->r12);
d015a092 78 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
7de08b4e 79 regs->r13, regs->r14, regs->r15);
1da177e4 80
7de08b4e
GP
81 asm("movl %%ds,%0" : "=r" (ds));
82 asm("movl %%cs,%0" : "=r" (cs));
83 asm("movl %%es,%0" : "=r" (es));
1da177e4
LT
84 asm("movl %%fs,%0" : "=r" (fsindex));
85 asm("movl %%gs,%0" : "=r" (gsindex));
86
87 rdmsrl(MSR_FS_BASE, fs);
7de08b4e
GP
88 rdmsrl(MSR_GS_BASE, gs);
89 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
1da177e4 90
e2ce07c8
PE
91 if (!all)
92 return;
1da177e4 93
f51c9452
GOC
94 cr0 = read_cr0();
95 cr2 = read_cr2();
96 cr3 = read_cr3();
97 cr4 = read_cr4();
1da177e4 98
d015a092 99 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
7de08b4e 100 fs, fsindex, gs, gsindex, shadowgs);
d015a092 101 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
8092c654 102 es, cr0);
d015a092 103 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
8092c654 104 cr4);
bb1995d5
AS
105
106 get_debugreg(d0, 0);
107 get_debugreg(d1, 1);
108 get_debugreg(d2, 2);
d015a092 109 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
bb1995d5
AS
110 get_debugreg(d3, 3);
111 get_debugreg(d6, 6);
112 get_debugreg(d7, 7);
d015a092 113 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
1da177e4
LT
114}
115
1da177e4
LT
116void release_thread(struct task_struct *dead_task)
117{
118 if (dead_task->mm) {
119 if (dead_task->mm->context.size) {
c767a54b
JP
120 pr_warn("WARNING: dead process %8s still has LDT? <%p/%d>\n",
121 dead_task->comm,
122 dead_task->mm->context.ldt,
123 dead_task->mm->context.size);
1da177e4
LT
124 BUG();
125 }
126 }
127}
128
129static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
130{
6612538c 131 struct user_desc ud = {
1da177e4
LT
132 .base_addr = addr,
133 .limit = 0xfffff,
134 .seg_32bit = 1,
135 .limit_in_pages = 1,
136 .useable = 1,
137 };
ade1af77 138 struct desc_struct *desc = t->thread.tls_array;
1da177e4 139 desc += tls;
80fbb69a 140 fill_ldt(desc, &ud);
1da177e4
LT
141}
142
143static inline u32 read_32bit_tls(struct task_struct *t, int tls)
144{
91394eb0 145 return get_desc_base(&t->thread.tls_array[tls]);
1da177e4
LT
146}
147
6f2c55b8 148int copy_thread(unsigned long clone_flags, unsigned long sp,
7076aada 149 unsigned long arg,
7de08b4e 150 struct task_struct *p, struct pt_regs *regs)
1da177e4
LT
151{
152 int err;
7de08b4e 153 struct pt_regs *childregs;
1da177e4
LT
154 struct task_struct *me = current;
155
7076aada
AV
156 p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
157 childregs = task_pt_regs(p);
faca6227 158 p->thread.sp = (unsigned long) childregs;
faca6227 159 p->thread.usersp = me->thread.usersp;
e4f17c43 160 set_tsk_thread_flag(p, TIF_FORK);
cea20ca3 161 p->fpu_counter = 0;
66cb5917 162 p->thread.io_bitmap_ptr = NULL;
1da177e4 163
ada85708 164 savesegment(gs, p->thread.gsindex);
7ce5a2b9 165 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
ada85708 166 savesegment(fs, p->thread.fsindex);
7ce5a2b9 167 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
ada85708
JF
168 savesegment(es, p->thread.es);
169 savesegment(ds, p->thread.ds);
7076aada
AV
170 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
171
1d4b4b29 172 if (unlikely(p->flags & PF_KTHREAD)) {
7076aada
AV
173 /* kernel thread */
174 memset(childregs, 0, sizeof(struct pt_regs));
175 childregs->sp = (unsigned long)childregs;
176 childregs->ss = __KERNEL_DS;
177 childregs->bx = sp; /* function */
178 childregs->bp = arg;
179 childregs->orig_ax = -1;
180 childregs->cs = __KERNEL_CS | get_kernel_rpl();
181 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
182 return 0;
183 }
1d4b4b29 184 *childregs = *current_pt_regs();
7076aada
AV
185
186 childregs->ax = 0;
1d4b4b29
AV
187 if (sp)
188 childregs->sp = sp;
1da177e4 189
66cb5917 190 err = -ENOMEM;
24f1e32c 191 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
66cb5917 192
d3a4f48d 193 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
cced4022
TM
194 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
195 IO_BITMAP_BYTES, GFP_KERNEL);
1da177e4
LT
196 if (!p->thread.io_bitmap_ptr) {
197 p->thread.io_bitmap_max = 0;
198 return -ENOMEM;
199 }
d3a4f48d 200 set_tsk_thread_flag(p, TIF_IO_BITMAP);
6612538c 201 }
1da177e4
LT
202
203 /*
204 * Set a new TLS for the child thread?
205 */
206 if (clone_flags & CLONE_SETTLS) {
207#ifdef CONFIG_IA32_EMULATION
208 if (test_thread_flag(TIF_IA32))
efd1ca52 209 err = do_set_thread_area(p, -1,
65ea5b03 210 (struct user_desc __user *)childregs->si, 0);
7de08b4e
GP
211 else
212#endif
213 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
214 if (err)
1da177e4
LT
215 goto out;
216 }
217 err = 0;
218out:
219 if (err && p->thread.io_bitmap_ptr) {
220 kfree(p->thread.io_bitmap_ptr);
221 p->thread.io_bitmap_max = 0;
222 }
66cb5917 223
1da177e4
LT
224 return err;
225}
226
e634d8fc
PA
227static void
228start_thread_common(struct pt_regs *regs, unsigned long new_ip,
229 unsigned long new_sp,
230 unsigned int _cs, unsigned int _ss, unsigned int _ds)
513ad84b 231{
ada85708 232 loadsegment(fs, 0);
e634d8fc
PA
233 loadsegment(es, _ds);
234 loadsegment(ds, _ds);
513ad84b 235 load_gs_index(0);
42dfc43e 236 current->thread.usersp = new_sp;
513ad84b
IM
237 regs->ip = new_ip;
238 regs->sp = new_sp;
c6ae41e7 239 this_cpu_write(old_rsp, new_sp);
e634d8fc
PA
240 regs->cs = _cs;
241 regs->ss = _ss;
a6f05a6a 242 regs->flags = X86_EFLAGS_IF;
513ad84b 243}
e634d8fc
PA
244
245void
246start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
247{
248 start_thread_common(regs, new_ip, new_sp,
249 __USER_CS, __USER_DS, 0);
250}
513ad84b 251
a6f05a6a
PA
252#ifdef CONFIG_IA32_EMULATION
253void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
254{
e634d8fc 255 start_thread_common(regs, new_ip, new_sp,
d1a797f3
PA
256 test_thread_flag(TIF_X32)
257 ? __USER_CS : __USER32_CS,
258 __USER_DS, __USER_DS);
a6f05a6a
PA
259}
260#endif
513ad84b 261
1da177e4
LT
262/*
263 * switch_to(x,y) should switch tasks from x to y.
264 *
6612538c 265 * This could still be optimized:
1da177e4
LT
266 * - fold all the options into a flag word and test it with a single test.
267 * - could test fs/gs bitsliced
099f318b
AK
268 *
269 * Kprobes not supported here. Set the probe on schedule instead.
8b96f011 270 * Function graph tracer not supported too.
1da177e4 271 */
8b96f011 272__notrace_funcgraph struct task_struct *
a88cde13 273__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
1da177e4 274{
87b935a0
JF
275 struct thread_struct *prev = &prev_p->thread;
276 struct thread_struct *next = &next_p->thread;
6612538c 277 int cpu = smp_processor_id();
1da177e4 278 struct tss_struct *tss = &per_cpu(init_tss, cpu);
478de5a9 279 unsigned fsindex, gsindex;
34ddc81a 280 fpu_switch_t fpu;
e07e23e1 281
7e16838d 282 fpu = switch_fpu_prepare(prev_p, next_p, cpu);
4903062b 283
1da177e4
LT
284 /*
285 * Reload esp0, LDT and the page table pointer:
286 */
7818a1e0 287 load_sp0(tss, next);
1da177e4 288
7de08b4e 289 /*
1da177e4
LT
290 * Switch DS and ES.
291 * This won't pick up thread selector changes, but I guess that is ok.
292 */
ada85708 293 savesegment(es, prev->es);
1da177e4 294 if (unlikely(next->es | prev->es))
7de08b4e 295 loadsegment(es, next->es);
ada85708
JF
296
297 savesegment(ds, prev->ds);
1da177e4
LT
298 if (unlikely(next->ds | prev->ds))
299 loadsegment(ds, next->ds);
300
478de5a9
JF
301
302 /* We must save %fs and %gs before load_TLS() because
303 * %fs and %gs may be cleared by load_TLS().
304 *
305 * (e.g. xen_load_tls())
306 */
307 savesegment(fs, fsindex);
308 savesegment(gs, gsindex);
309
1da177e4
LT
310 load_TLS(next, cpu);
311
3fe0a63e
JF
312 /*
313 * Leave lazy mode, flushing any hypercalls made here.
314 * This must be done before restoring TLS segments so
315 * the GDT and LDT are properly updated, and must be
316 * done before math_state_restore, so the TS bit is up
317 * to date.
318 */
224101ed 319 arch_end_context_switch(next_p);
3fe0a63e 320
7de08b4e 321 /*
1da177e4 322 * Switch FS and GS.
87b935a0
JF
323 *
324 * Segment register != 0 always requires a reload. Also
325 * reload when it has changed. When prev process used 64bit
326 * base always reload to avoid an information leak.
1da177e4 327 */
87b935a0
JF
328 if (unlikely(fsindex | next->fsindex | prev->fs)) {
329 loadsegment(fs, next->fsindex);
7de08b4e 330 /*
87b935a0
JF
331 * Check if the user used a selector != 0; if yes
332 * clear 64bit base, since overloaded base is always
333 * mapped to the Null selector
334 */
335 if (fsindex)
7de08b4e 336 prev->fs = 0;
1da177e4 337 }
87b935a0
JF
338 /* when next process has a 64bit base use it */
339 if (next->fs)
340 wrmsrl(MSR_FS_BASE, next->fs);
341 prev->fsindex = fsindex;
342
343 if (unlikely(gsindex | next->gsindex | prev->gs)) {
344 load_gs_index(next->gsindex);
345 if (gsindex)
7de08b4e 346 prev->gs = 0;
1da177e4 347 }
87b935a0
JF
348 if (next->gs)
349 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
350 prev->gsindex = gsindex;
1da177e4 351
34ddc81a
LT
352 switch_fpu_finish(next_p, fpu);
353
7de08b4e 354 /*
45948d77 355 * Switch the PDA and FPU contexts.
1da177e4 356 */
c6ae41e7
AS
357 prev->usersp = this_cpu_read(old_rsp);
358 this_cpu_write(old_rsp, next->usersp);
359 this_cpu_write(current_task, next_p);
18bd057b 360
c6ae41e7 361 this_cpu_write(kernel_stack,
87b935a0 362 (unsigned long)task_stack_page(next_p) +
9af45651 363 THREAD_SIZE - KERNEL_STACK_OFFSET);
1da177e4
LT
364
365 /*
d3a4f48d 366 * Now maybe reload the debug registers and handle I/O bitmaps
1da177e4 367 */
eee3af4a
MM
368 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
369 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
d3a4f48d 370 __switch_to_xtra(prev_p, next_p, tss);
1da177e4
LT
371
372 return prev_p;
373}
374
1da177e4
LT
375void set_personality_64bit(void)
376{
377 /* inherit personality from parent */
378
379 /* Make sure to be in 64bit mode */
6612538c 380 clear_thread_flag(TIF_IA32);
6bd33008 381 clear_thread_flag(TIF_ADDR32);
bb212724 382 clear_thread_flag(TIF_X32);
1da177e4 383
375906f8
SW
384 /* Ensure the corresponding mm is not marked. */
385 if (current->mm)
386 current->mm->context.ia32_compat = 0;
387
1da177e4
LT
388 /* TBD: overwrites user setup. Should have two bits.
389 But 64bit processes have always behaved this way,
390 so it's not too bad. The main problem is just that
6612538c 391 32bit childs are affected again. */
1da177e4
LT
392 current->personality &= ~READ_IMPLIES_EXEC;
393}
394
d1a797f3 395void set_personality_ia32(bool x32)
05d43ed8
PA
396{
397 /* inherit personality from parent */
398
399 /* Make sure to be in 32bit mode */
6bd33008 400 set_thread_flag(TIF_ADDR32);
05d43ed8 401
375906f8
SW
402 /* Mark the associated mm as containing 32-bit tasks. */
403 if (current->mm)
404 current->mm->context.ia32_compat = 1;
405
d1a797f3
PA
406 if (x32) {
407 clear_thread_flag(TIF_IA32);
408 set_thread_flag(TIF_X32);
409 current->personality &= ~READ_IMPLIES_EXEC;
ce5f7a99
BP
410 /* is_compat_task() uses the presence of the x32
411 syscall bit flag to determine compat status */
412 current_thread_info()->status &= ~TS_COMPAT;
d1a797f3
PA
413 } else {
414 set_thread_flag(TIF_IA32);
415 clear_thread_flag(TIF_X32);
416 current->personality |= force_personality32;
417 /* Prepare the first "return" to user space */
418 current_thread_info()->status |= TS_COMPAT;
419 }
05d43ed8 420}
febb72a6 421EXPORT_SYMBOL_GPL(set_personality_ia32);
05d43ed8 422
1da177e4
LT
423unsigned long get_wchan(struct task_struct *p)
424{
425 unsigned long stack;
7de08b4e 426 u64 fp, ip;
1da177e4
LT
427 int count = 0;
428
7de08b4e
GP
429 if (!p || p == current || p->state == TASK_RUNNING)
430 return 0;
57eafdc2 431 stack = (unsigned long)task_stack_page(p);
e1e23bb0 432 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
1da177e4 433 return 0;
faca6227 434 fp = *(u64 *)(p->thread.sp);
7de08b4e 435 do {
a88cde13 436 if (fp < (unsigned long)stack ||
e1e23bb0 437 fp >= (unsigned long)stack+THREAD_SIZE)
7de08b4e 438 return 0;
65ea5b03
PA
439 ip = *(u64 *)(fp+8);
440 if (!in_sched_functions(ip))
441 return ip;
7de08b4e
GP
442 fp = *(u64 *)fp;
443 } while (count++ < 16);
1da177e4
LT
444 return 0;
445}
446
447long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
7de08b4e
GP
448{
449 int ret = 0;
1da177e4
LT
450 int doit = task == current;
451 int cpu;
452
7de08b4e 453 switch (code) {
1da177e4 454 case ARCH_SET_GS:
84929801 455 if (addr >= TASK_SIZE_OF(task))
7de08b4e 456 return -EPERM;
1da177e4 457 cpu = get_cpu();
7de08b4e 458 /* handle small bases via the GDT because that's faster to
1da177e4 459 switch. */
7de08b4e
GP
460 if (addr <= 0xffffffff) {
461 set_32bit_tls(task, GS_TLS, addr);
462 if (doit) {
1da177e4 463 load_TLS(&task->thread, cpu);
7de08b4e 464 load_gs_index(GS_TLS_SEL);
1da177e4 465 }
7de08b4e 466 task->thread.gsindex = GS_TLS_SEL;
1da177e4 467 task->thread.gs = 0;
7de08b4e 468 } else {
1da177e4
LT
469 task->thread.gsindex = 0;
470 task->thread.gs = addr;
471 if (doit) {
a88cde13 472 load_gs_index(0);
715c85b1 473 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
7de08b4e 474 }
1da177e4
LT
475 }
476 put_cpu();
477 break;
478 case ARCH_SET_FS:
479 /* Not strictly needed for fs, but do it for symmetry
480 with gs */
84929801 481 if (addr >= TASK_SIZE_OF(task))
6612538c 482 return -EPERM;
1da177e4 483 cpu = get_cpu();
6612538c 484 /* handle small bases via the GDT because that's faster to
1da177e4 485 switch. */
6612538c 486 if (addr <= 0xffffffff) {
1da177e4 487 set_32bit_tls(task, FS_TLS, addr);
6612538c
HS
488 if (doit) {
489 load_TLS(&task->thread, cpu);
ada85708 490 loadsegment(fs, FS_TLS_SEL);
1da177e4
LT
491 }
492 task->thread.fsindex = FS_TLS_SEL;
493 task->thread.fs = 0;
6612538c 494 } else {
1da177e4
LT
495 task->thread.fsindex = 0;
496 task->thread.fs = addr;
497 if (doit) {
498 /* set the selector to 0 to not confuse
499 __switch_to */
ada85708 500 loadsegment(fs, 0);
715c85b1 501 ret = wrmsrl_safe(MSR_FS_BASE, addr);
1da177e4
LT
502 }
503 }
504 put_cpu();
505 break;
6612538c
HS
506 case ARCH_GET_FS: {
507 unsigned long base;
1da177e4
LT
508 if (task->thread.fsindex == FS_TLS_SEL)
509 base = read_32bit_tls(task, FS_TLS);
a88cde13 510 else if (doit)
1da177e4 511 rdmsrl(MSR_FS_BASE, base);
a88cde13 512 else
1da177e4 513 base = task->thread.fs;
6612538c
HS
514 ret = put_user(base, (unsigned long __user *)addr);
515 break;
1da177e4 516 }
6612538c 517 case ARCH_GET_GS: {
1da177e4 518 unsigned long base;
97c2803c 519 unsigned gsindex;
1da177e4
LT
520 if (task->thread.gsindex == GS_TLS_SEL)
521 base = read_32bit_tls(task, GS_TLS);
97c2803c 522 else if (doit) {
ada85708 523 savesegment(gs, gsindex);
97c2803c
JB
524 if (gsindex)
525 rdmsrl(MSR_KERNEL_GS_BASE, base);
526 else
527 base = task->thread.gs;
7de08b4e 528 } else
1da177e4 529 base = task->thread.gs;
6612538c 530 ret = put_user(base, (unsigned long __user *)addr);
1da177e4
LT
531 break;
532 }
533
534 default:
535 ret = -EINVAL;
536 break;
6612538c 537 }
1da177e4 538
6612538c
HS
539 return ret;
540}
1da177e4
LT
541
542long sys_arch_prctl(int code, unsigned long addr)
543{
544 return do_arch_prctl(current, code, addr);
1da177e4
LT
545}
546
89240ba0
SS
547unsigned long KSTK_ESP(struct task_struct *task)
548{
549 return (test_tsk_thread_flag(task, TIF_IA32)) ?
550 (task_pt_regs(task)->sp) : ((task)->thread.usersp);
551}
This page took 0.702089 seconds and 5 git commands to generate.