Merge tag 'for-3.8' of git://openrisc.net/~jonas/linux
[deliverable/linux.git] / arch / x86 / kernel / process_64.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6612538c 6 *
1da177e4
LT
7 * X86-64 port
8 * Andi Kleen.
76e4f660
AR
9 *
10 * CPU hotplug support - ashok.raj@intel.com
1da177e4
LT
11 */
12
13/*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
76e4f660 17#include <linux/cpu.h>
1da177e4
LT
18#include <linux/errno.h>
19#include <linux/sched.h>
6612538c 20#include <linux/fs.h>
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/elfcore.h>
24#include <linux/smp.h>
25#include <linux/slab.h>
26#include <linux/user.h>
1da177e4
LT
27#include <linux/interrupt.h>
28#include <linux/delay.h>
6612538c 29#include <linux/module.h>
1da177e4 30#include <linux/ptrace.h>
95833c83 31#include <linux/notifier.h>
c6fd91f0 32#include <linux/kprobes.h>
1eeb66a1 33#include <linux/kdebug.h>
529e25f6 34#include <linux/prctl.h>
7de08b4e
GP
35#include <linux/uaccess.h>
36#include <linux/io.h>
8b96f011 37#include <linux/ftrace.h>
1da177e4 38
1da177e4 39#include <asm/pgtable.h>
1da177e4
LT
40#include <asm/processor.h>
41#include <asm/i387.h>
1361b83a 42#include <asm/fpu-internal.h>
1da177e4 43#include <asm/mmu_context.h>
1da177e4 44#include <asm/prctl.h>
1da177e4
LT
45#include <asm/desc.h>
46#include <asm/proto.h>
47#include <asm/ia32.h>
95833c83 48#include <asm/idle.h>
bbc1f698 49#include <asm/syscalls.h>
66cb5917 50#include <asm/debugreg.h>
f05e798a 51#include <asm/switch_to.h>
1da177e4
LT
52
53asmlinkage extern void ret_from_fork(void);
54
3d1e42a7 55DEFINE_PER_CPU(unsigned long, old_rsp);
1da177e4 56
6612538c 57/* Prints also some state that isn't saved in the pt_regs */
e2ce07c8 58void __show_regs(struct pt_regs *regs, int all)
1da177e4
LT
59{
60 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
bb1995d5 61 unsigned long d0, d1, d2, d3, d6, d7;
6612538c
HS
62 unsigned int fsindex, gsindex;
63 unsigned int ds, cs, es;
814e2c84
AI
64
65 show_regs_common();
d015a092 66 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
aafbd7eb 67 printk_address(regs->ip, 1);
d015a092 68 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
8092c654 69 regs->sp, regs->flags);
d015a092 70 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
65ea5b03 71 regs->ax, regs->bx, regs->cx);
d015a092 72 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
65ea5b03 73 regs->dx, regs->si, regs->di);
d015a092 74 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
65ea5b03 75 regs->bp, regs->r8, regs->r9);
d015a092 76 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
7de08b4e 77 regs->r10, regs->r11, regs->r12);
d015a092 78 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
7de08b4e 79 regs->r13, regs->r14, regs->r15);
1da177e4 80
7de08b4e
GP
81 asm("movl %%ds,%0" : "=r" (ds));
82 asm("movl %%cs,%0" : "=r" (cs));
83 asm("movl %%es,%0" : "=r" (es));
1da177e4
LT
84 asm("movl %%fs,%0" : "=r" (fsindex));
85 asm("movl %%gs,%0" : "=r" (gsindex));
86
87 rdmsrl(MSR_FS_BASE, fs);
7de08b4e
GP
88 rdmsrl(MSR_GS_BASE, gs);
89 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
1da177e4 90
e2ce07c8
PE
91 if (!all)
92 return;
1da177e4 93
f51c9452
GOC
94 cr0 = read_cr0();
95 cr2 = read_cr2();
96 cr3 = read_cr3();
97 cr4 = read_cr4();
1da177e4 98
d015a092 99 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
7de08b4e 100 fs, fsindex, gs, gsindex, shadowgs);
d015a092 101 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
8092c654 102 es, cr0);
d015a092 103 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
8092c654 104 cr4);
bb1995d5
AS
105
106 get_debugreg(d0, 0);
107 get_debugreg(d1, 1);
108 get_debugreg(d2, 2);
d015a092 109 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
bb1995d5
AS
110 get_debugreg(d3, 3);
111 get_debugreg(d6, 6);
112 get_debugreg(d7, 7);
d015a092 113 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
1da177e4
LT
114}
115
1da177e4
LT
116void release_thread(struct task_struct *dead_task)
117{
118 if (dead_task->mm) {
119 if (dead_task->mm->context.size) {
c767a54b
JP
120 pr_warn("WARNING: dead process %8s still has LDT? <%p/%d>\n",
121 dead_task->comm,
122 dead_task->mm->context.ldt,
123 dead_task->mm->context.size);
1da177e4
LT
124 BUG();
125 }
126 }
127}
128
129static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
130{
6612538c 131 struct user_desc ud = {
1da177e4
LT
132 .base_addr = addr,
133 .limit = 0xfffff,
134 .seg_32bit = 1,
135 .limit_in_pages = 1,
136 .useable = 1,
137 };
ade1af77 138 struct desc_struct *desc = t->thread.tls_array;
1da177e4 139 desc += tls;
80fbb69a 140 fill_ldt(desc, &ud);
1da177e4
LT
141}
142
143static inline u32 read_32bit_tls(struct task_struct *t, int tls)
144{
91394eb0 145 return get_desc_base(&t->thread.tls_array[tls]);
1da177e4
LT
146}
147
6f2c55b8 148int copy_thread(unsigned long clone_flags, unsigned long sp,
afa86fc4 149 unsigned long arg, struct task_struct *p)
1da177e4
LT
150{
151 int err;
7de08b4e 152 struct pt_regs *childregs;
1da177e4
LT
153 struct task_struct *me = current;
154
7076aada
AV
155 p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
156 childregs = task_pt_regs(p);
faca6227 157 p->thread.sp = (unsigned long) childregs;
faca6227 158 p->thread.usersp = me->thread.usersp;
e4f17c43 159 set_tsk_thread_flag(p, TIF_FORK);
cea20ca3 160 p->fpu_counter = 0;
66cb5917 161 p->thread.io_bitmap_ptr = NULL;
1da177e4 162
ada85708 163 savesegment(gs, p->thread.gsindex);
7ce5a2b9 164 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
ada85708 165 savesegment(fs, p->thread.fsindex);
7ce5a2b9 166 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
ada85708
JF
167 savesegment(es, p->thread.es);
168 savesegment(ds, p->thread.ds);
7076aada
AV
169 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
170
1d4b4b29 171 if (unlikely(p->flags & PF_KTHREAD)) {
7076aada
AV
172 /* kernel thread */
173 memset(childregs, 0, sizeof(struct pt_regs));
174 childregs->sp = (unsigned long)childregs;
175 childregs->ss = __KERNEL_DS;
176 childregs->bx = sp; /* function */
177 childregs->bp = arg;
178 childregs->orig_ax = -1;
179 childregs->cs = __KERNEL_CS | get_kernel_rpl();
180 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
181 return 0;
182 }
1d4b4b29 183 *childregs = *current_pt_regs();
7076aada
AV
184
185 childregs->ax = 0;
1d4b4b29
AV
186 if (sp)
187 childregs->sp = sp;
1da177e4 188
66cb5917 189 err = -ENOMEM;
24f1e32c 190 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
66cb5917 191
d3a4f48d 192 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
cced4022
TM
193 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
194 IO_BITMAP_BYTES, GFP_KERNEL);
1da177e4
LT
195 if (!p->thread.io_bitmap_ptr) {
196 p->thread.io_bitmap_max = 0;
197 return -ENOMEM;
198 }
d3a4f48d 199 set_tsk_thread_flag(p, TIF_IO_BITMAP);
6612538c 200 }
1da177e4
LT
201
202 /*
203 * Set a new TLS for the child thread?
204 */
205 if (clone_flags & CLONE_SETTLS) {
206#ifdef CONFIG_IA32_EMULATION
207 if (test_thread_flag(TIF_IA32))
efd1ca52 208 err = do_set_thread_area(p, -1,
65ea5b03 209 (struct user_desc __user *)childregs->si, 0);
7de08b4e
GP
210 else
211#endif
212 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
213 if (err)
1da177e4
LT
214 goto out;
215 }
216 err = 0;
217out:
218 if (err && p->thread.io_bitmap_ptr) {
219 kfree(p->thread.io_bitmap_ptr);
220 p->thread.io_bitmap_max = 0;
221 }
66cb5917 222
1da177e4
LT
223 return err;
224}
225
e634d8fc
PA
226static void
227start_thread_common(struct pt_regs *regs, unsigned long new_ip,
228 unsigned long new_sp,
229 unsigned int _cs, unsigned int _ss, unsigned int _ds)
513ad84b 230{
ada85708 231 loadsegment(fs, 0);
e634d8fc
PA
232 loadsegment(es, _ds);
233 loadsegment(ds, _ds);
513ad84b 234 load_gs_index(0);
42dfc43e 235 current->thread.usersp = new_sp;
513ad84b
IM
236 regs->ip = new_ip;
237 regs->sp = new_sp;
c6ae41e7 238 this_cpu_write(old_rsp, new_sp);
e634d8fc
PA
239 regs->cs = _cs;
240 regs->ss = _ss;
a6f05a6a 241 regs->flags = X86_EFLAGS_IF;
513ad84b 242}
e634d8fc
PA
243
244void
245start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
246{
247 start_thread_common(regs, new_ip, new_sp,
248 __USER_CS, __USER_DS, 0);
249}
513ad84b 250
a6f05a6a
PA
251#ifdef CONFIG_IA32_EMULATION
252void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
253{
e634d8fc 254 start_thread_common(regs, new_ip, new_sp,
d1a797f3
PA
255 test_thread_flag(TIF_X32)
256 ? __USER_CS : __USER32_CS,
257 __USER_DS, __USER_DS);
a6f05a6a
PA
258}
259#endif
513ad84b 260
1da177e4
LT
261/*
262 * switch_to(x,y) should switch tasks from x to y.
263 *
6612538c 264 * This could still be optimized:
1da177e4
LT
265 * - fold all the options into a flag word and test it with a single test.
266 * - could test fs/gs bitsliced
099f318b
AK
267 *
268 * Kprobes not supported here. Set the probe on schedule instead.
8b96f011 269 * Function graph tracer not supported too.
1da177e4 270 */
8b96f011 271__notrace_funcgraph struct task_struct *
a88cde13 272__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
1da177e4 273{
87b935a0
JF
274 struct thread_struct *prev = &prev_p->thread;
275 struct thread_struct *next = &next_p->thread;
6612538c 276 int cpu = smp_processor_id();
1da177e4 277 struct tss_struct *tss = &per_cpu(init_tss, cpu);
478de5a9 278 unsigned fsindex, gsindex;
34ddc81a 279 fpu_switch_t fpu;
e07e23e1 280
7e16838d 281 fpu = switch_fpu_prepare(prev_p, next_p, cpu);
4903062b 282
1da177e4
LT
283 /*
284 * Reload esp0, LDT and the page table pointer:
285 */
7818a1e0 286 load_sp0(tss, next);
1da177e4 287
7de08b4e 288 /*
1da177e4
LT
289 * Switch DS and ES.
290 * This won't pick up thread selector changes, but I guess that is ok.
291 */
ada85708 292 savesegment(es, prev->es);
1da177e4 293 if (unlikely(next->es | prev->es))
7de08b4e 294 loadsegment(es, next->es);
ada85708
JF
295
296 savesegment(ds, prev->ds);
1da177e4
LT
297 if (unlikely(next->ds | prev->ds))
298 loadsegment(ds, next->ds);
299
478de5a9
JF
300
301 /* We must save %fs and %gs before load_TLS() because
302 * %fs and %gs may be cleared by load_TLS().
303 *
304 * (e.g. xen_load_tls())
305 */
306 savesegment(fs, fsindex);
307 savesegment(gs, gsindex);
308
1da177e4
LT
309 load_TLS(next, cpu);
310
3fe0a63e
JF
311 /*
312 * Leave lazy mode, flushing any hypercalls made here.
313 * This must be done before restoring TLS segments so
314 * the GDT and LDT are properly updated, and must be
315 * done before math_state_restore, so the TS bit is up
316 * to date.
317 */
224101ed 318 arch_end_context_switch(next_p);
3fe0a63e 319
7de08b4e 320 /*
1da177e4 321 * Switch FS and GS.
87b935a0
JF
322 *
323 * Segment register != 0 always requires a reload. Also
324 * reload when it has changed. When prev process used 64bit
325 * base always reload to avoid an information leak.
1da177e4 326 */
87b935a0
JF
327 if (unlikely(fsindex | next->fsindex | prev->fs)) {
328 loadsegment(fs, next->fsindex);
7de08b4e 329 /*
87b935a0
JF
330 * Check if the user used a selector != 0; if yes
331 * clear 64bit base, since overloaded base is always
332 * mapped to the Null selector
333 */
334 if (fsindex)
7de08b4e 335 prev->fs = 0;
1da177e4 336 }
87b935a0
JF
337 /* when next process has a 64bit base use it */
338 if (next->fs)
339 wrmsrl(MSR_FS_BASE, next->fs);
340 prev->fsindex = fsindex;
341
342 if (unlikely(gsindex | next->gsindex | prev->gs)) {
343 load_gs_index(next->gsindex);
344 if (gsindex)
7de08b4e 345 prev->gs = 0;
1da177e4 346 }
87b935a0
JF
347 if (next->gs)
348 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
349 prev->gsindex = gsindex;
1da177e4 350
34ddc81a
LT
351 switch_fpu_finish(next_p, fpu);
352
7de08b4e 353 /*
45948d77 354 * Switch the PDA and FPU contexts.
1da177e4 355 */
c6ae41e7
AS
356 prev->usersp = this_cpu_read(old_rsp);
357 this_cpu_write(old_rsp, next->usersp);
358 this_cpu_write(current_task, next_p);
18bd057b 359
c6ae41e7 360 this_cpu_write(kernel_stack,
87b935a0 361 (unsigned long)task_stack_page(next_p) +
9af45651 362 THREAD_SIZE - KERNEL_STACK_OFFSET);
1da177e4
LT
363
364 /*
d3a4f48d 365 * Now maybe reload the debug registers and handle I/O bitmaps
1da177e4 366 */
eee3af4a
MM
367 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
368 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
d3a4f48d 369 __switch_to_xtra(prev_p, next_p, tss);
1da177e4
LT
370
371 return prev_p;
372}
373
1da177e4
LT
374void set_personality_64bit(void)
375{
376 /* inherit personality from parent */
377
378 /* Make sure to be in 64bit mode */
6612538c 379 clear_thread_flag(TIF_IA32);
6bd33008 380 clear_thread_flag(TIF_ADDR32);
bb212724 381 clear_thread_flag(TIF_X32);
1da177e4 382
375906f8
SW
383 /* Ensure the corresponding mm is not marked. */
384 if (current->mm)
385 current->mm->context.ia32_compat = 0;
386
1da177e4
LT
387 /* TBD: overwrites user setup. Should have two bits.
388 But 64bit processes have always behaved this way,
389 so it's not too bad. The main problem is just that
6612538c 390 32bit childs are affected again. */
1da177e4
LT
391 current->personality &= ~READ_IMPLIES_EXEC;
392}
393
d1a797f3 394void set_personality_ia32(bool x32)
05d43ed8
PA
395{
396 /* inherit personality from parent */
397
398 /* Make sure to be in 32bit mode */
6bd33008 399 set_thread_flag(TIF_ADDR32);
05d43ed8 400
375906f8
SW
401 /* Mark the associated mm as containing 32-bit tasks. */
402 if (current->mm)
403 current->mm->context.ia32_compat = 1;
404
d1a797f3
PA
405 if (x32) {
406 clear_thread_flag(TIF_IA32);
407 set_thread_flag(TIF_X32);
408 current->personality &= ~READ_IMPLIES_EXEC;
ce5f7a99
BP
409 /* is_compat_task() uses the presence of the x32
410 syscall bit flag to determine compat status */
411 current_thread_info()->status &= ~TS_COMPAT;
d1a797f3
PA
412 } else {
413 set_thread_flag(TIF_IA32);
414 clear_thread_flag(TIF_X32);
415 current->personality |= force_personality32;
416 /* Prepare the first "return" to user space */
417 current_thread_info()->status |= TS_COMPAT;
418 }
05d43ed8 419}
febb72a6 420EXPORT_SYMBOL_GPL(set_personality_ia32);
05d43ed8 421
1da177e4
LT
422unsigned long get_wchan(struct task_struct *p)
423{
424 unsigned long stack;
7de08b4e 425 u64 fp, ip;
1da177e4
LT
426 int count = 0;
427
7de08b4e
GP
428 if (!p || p == current || p->state == TASK_RUNNING)
429 return 0;
57eafdc2 430 stack = (unsigned long)task_stack_page(p);
e1e23bb0 431 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
1da177e4 432 return 0;
faca6227 433 fp = *(u64 *)(p->thread.sp);
7de08b4e 434 do {
a88cde13 435 if (fp < (unsigned long)stack ||
e1e23bb0 436 fp >= (unsigned long)stack+THREAD_SIZE)
7de08b4e 437 return 0;
65ea5b03
PA
438 ip = *(u64 *)(fp+8);
439 if (!in_sched_functions(ip))
440 return ip;
7de08b4e
GP
441 fp = *(u64 *)fp;
442 } while (count++ < 16);
1da177e4
LT
443 return 0;
444}
445
446long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
7de08b4e
GP
447{
448 int ret = 0;
1da177e4
LT
449 int doit = task == current;
450 int cpu;
451
7de08b4e 452 switch (code) {
1da177e4 453 case ARCH_SET_GS:
84929801 454 if (addr >= TASK_SIZE_OF(task))
7de08b4e 455 return -EPERM;
1da177e4 456 cpu = get_cpu();
7de08b4e 457 /* handle small bases via the GDT because that's faster to
1da177e4 458 switch. */
7de08b4e
GP
459 if (addr <= 0xffffffff) {
460 set_32bit_tls(task, GS_TLS, addr);
461 if (doit) {
1da177e4 462 load_TLS(&task->thread, cpu);
7de08b4e 463 load_gs_index(GS_TLS_SEL);
1da177e4 464 }
7de08b4e 465 task->thread.gsindex = GS_TLS_SEL;
1da177e4 466 task->thread.gs = 0;
7de08b4e 467 } else {
1da177e4
LT
468 task->thread.gsindex = 0;
469 task->thread.gs = addr;
470 if (doit) {
a88cde13 471 load_gs_index(0);
715c85b1 472 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
7de08b4e 473 }
1da177e4
LT
474 }
475 put_cpu();
476 break;
477 case ARCH_SET_FS:
478 /* Not strictly needed for fs, but do it for symmetry
479 with gs */
84929801 480 if (addr >= TASK_SIZE_OF(task))
6612538c 481 return -EPERM;
1da177e4 482 cpu = get_cpu();
6612538c 483 /* handle small bases via the GDT because that's faster to
1da177e4 484 switch. */
6612538c 485 if (addr <= 0xffffffff) {
1da177e4 486 set_32bit_tls(task, FS_TLS, addr);
6612538c
HS
487 if (doit) {
488 load_TLS(&task->thread, cpu);
ada85708 489 loadsegment(fs, FS_TLS_SEL);
1da177e4
LT
490 }
491 task->thread.fsindex = FS_TLS_SEL;
492 task->thread.fs = 0;
6612538c 493 } else {
1da177e4
LT
494 task->thread.fsindex = 0;
495 task->thread.fs = addr;
496 if (doit) {
497 /* set the selector to 0 to not confuse
498 __switch_to */
ada85708 499 loadsegment(fs, 0);
715c85b1 500 ret = wrmsrl_safe(MSR_FS_BASE, addr);
1da177e4
LT
501 }
502 }
503 put_cpu();
504 break;
6612538c
HS
505 case ARCH_GET_FS: {
506 unsigned long base;
1da177e4
LT
507 if (task->thread.fsindex == FS_TLS_SEL)
508 base = read_32bit_tls(task, FS_TLS);
a88cde13 509 else if (doit)
1da177e4 510 rdmsrl(MSR_FS_BASE, base);
a88cde13 511 else
1da177e4 512 base = task->thread.fs;
6612538c
HS
513 ret = put_user(base, (unsigned long __user *)addr);
514 break;
1da177e4 515 }
6612538c 516 case ARCH_GET_GS: {
1da177e4 517 unsigned long base;
97c2803c 518 unsigned gsindex;
1da177e4
LT
519 if (task->thread.gsindex == GS_TLS_SEL)
520 base = read_32bit_tls(task, GS_TLS);
97c2803c 521 else if (doit) {
ada85708 522 savesegment(gs, gsindex);
97c2803c
JB
523 if (gsindex)
524 rdmsrl(MSR_KERNEL_GS_BASE, base);
525 else
526 base = task->thread.gs;
7de08b4e 527 } else
1da177e4 528 base = task->thread.gs;
6612538c 529 ret = put_user(base, (unsigned long __user *)addr);
1da177e4
LT
530 break;
531 }
532
533 default:
534 ret = -EINVAL;
535 break;
6612538c 536 }
1da177e4 537
6612538c
HS
538 return ret;
539}
1da177e4
LT
540
541long sys_arch_prctl(int code, unsigned long addr)
542{
543 return do_arch_prctl(current, code, addr);
1da177e4
LT
544}
545
89240ba0
SS
546unsigned long KSTK_ESP(struct task_struct *task)
547{
548 return (test_tsk_thread_flag(task, TIF_IA32)) ?
549 (task_pt_regs(task)->sp) : ((task)->thread.usersp);
550}
This page took 0.711876 seconds and 5 git commands to generate.