dump_stack: unify debug information printed by show_regs()
[deliverable/linux.git] / arch / x86 / kernel / process_64.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6612538c 6 *
1da177e4
LT
7 * X86-64 port
8 * Andi Kleen.
76e4f660
AR
9 *
10 * CPU hotplug support - ashok.raj@intel.com
1da177e4
LT
11 */
12
13/*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
76e4f660 17#include <linux/cpu.h>
1da177e4
LT
18#include <linux/errno.h>
19#include <linux/sched.h>
6612538c 20#include <linux/fs.h>
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/elfcore.h>
24#include <linux/smp.h>
25#include <linux/slab.h>
26#include <linux/user.h>
1da177e4
LT
27#include <linux/interrupt.h>
28#include <linux/delay.h>
6612538c 29#include <linux/module.h>
1da177e4 30#include <linux/ptrace.h>
95833c83 31#include <linux/notifier.h>
c6fd91f0 32#include <linux/kprobes.h>
1eeb66a1 33#include <linux/kdebug.h>
529e25f6 34#include <linux/prctl.h>
7de08b4e
GP
35#include <linux/uaccess.h>
36#include <linux/io.h>
8b96f011 37#include <linux/ftrace.h>
1da177e4 38
1da177e4 39#include <asm/pgtable.h>
1da177e4
LT
40#include <asm/processor.h>
41#include <asm/i387.h>
1361b83a 42#include <asm/fpu-internal.h>
1da177e4 43#include <asm/mmu_context.h>
1da177e4 44#include <asm/prctl.h>
1da177e4
LT
45#include <asm/desc.h>
46#include <asm/proto.h>
47#include <asm/ia32.h>
95833c83 48#include <asm/idle.h>
bbc1f698 49#include <asm/syscalls.h>
66cb5917 50#include <asm/debugreg.h>
f05e798a 51#include <asm/switch_to.h>
1da177e4
LT
52
53asmlinkage extern void ret_from_fork(void);
54
3d1e42a7 55DEFINE_PER_CPU(unsigned long, old_rsp);
1da177e4 56
6612538c 57/* Prints also some state that isn't saved in the pt_regs */
e2ce07c8 58void __show_regs(struct pt_regs *regs, int all)
1da177e4
LT
59{
60 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
bb1995d5 61 unsigned long d0, d1, d2, d3, d6, d7;
6612538c
HS
62 unsigned int fsindex, gsindex;
63 unsigned int ds, cs, es;
814e2c84 64
d015a092 65 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
aafbd7eb 66 printk_address(regs->ip, 1);
d015a092 67 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
8092c654 68 regs->sp, regs->flags);
d015a092 69 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
65ea5b03 70 regs->ax, regs->bx, regs->cx);
d015a092 71 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
65ea5b03 72 regs->dx, regs->si, regs->di);
d015a092 73 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
65ea5b03 74 regs->bp, regs->r8, regs->r9);
d015a092 75 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
7de08b4e 76 regs->r10, regs->r11, regs->r12);
d015a092 77 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
7de08b4e 78 regs->r13, regs->r14, regs->r15);
1da177e4 79
7de08b4e
GP
80 asm("movl %%ds,%0" : "=r" (ds));
81 asm("movl %%cs,%0" : "=r" (cs));
82 asm("movl %%es,%0" : "=r" (es));
1da177e4
LT
83 asm("movl %%fs,%0" : "=r" (fsindex));
84 asm("movl %%gs,%0" : "=r" (gsindex));
85
86 rdmsrl(MSR_FS_BASE, fs);
7de08b4e
GP
87 rdmsrl(MSR_GS_BASE, gs);
88 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
1da177e4 89
e2ce07c8
PE
90 if (!all)
91 return;
1da177e4 92
f51c9452
GOC
93 cr0 = read_cr0();
94 cr2 = read_cr2();
95 cr3 = read_cr3();
96 cr4 = read_cr4();
1da177e4 97
d015a092 98 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
7de08b4e 99 fs, fsindex, gs, gsindex, shadowgs);
d015a092 100 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
8092c654 101 es, cr0);
d015a092 102 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
8092c654 103 cr4);
bb1995d5
AS
104
105 get_debugreg(d0, 0);
106 get_debugreg(d1, 1);
107 get_debugreg(d2, 2);
d015a092 108 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
bb1995d5
AS
109 get_debugreg(d3, 3);
110 get_debugreg(d6, 6);
111 get_debugreg(d7, 7);
d015a092 112 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
1da177e4
LT
113}
114
1da177e4
LT
115void release_thread(struct task_struct *dead_task)
116{
117 if (dead_task->mm) {
118 if (dead_task->mm->context.size) {
349eab6e 119 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
c767a54b
JP
120 dead_task->comm,
121 dead_task->mm->context.ldt,
122 dead_task->mm->context.size);
1da177e4
LT
123 BUG();
124 }
125 }
126}
127
128static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
129{
6612538c 130 struct user_desc ud = {
1da177e4
LT
131 .base_addr = addr,
132 .limit = 0xfffff,
133 .seg_32bit = 1,
134 .limit_in_pages = 1,
135 .useable = 1,
136 };
ade1af77 137 struct desc_struct *desc = t->thread.tls_array;
1da177e4 138 desc += tls;
80fbb69a 139 fill_ldt(desc, &ud);
1da177e4
LT
140}
141
142static inline u32 read_32bit_tls(struct task_struct *t, int tls)
143{
91394eb0 144 return get_desc_base(&t->thread.tls_array[tls]);
1da177e4
LT
145}
146
6f2c55b8 147int copy_thread(unsigned long clone_flags, unsigned long sp,
afa86fc4 148 unsigned long arg, struct task_struct *p)
1da177e4
LT
149{
150 int err;
7de08b4e 151 struct pt_regs *childregs;
1da177e4
LT
152 struct task_struct *me = current;
153
7076aada
AV
154 p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
155 childregs = task_pt_regs(p);
faca6227 156 p->thread.sp = (unsigned long) childregs;
faca6227 157 p->thread.usersp = me->thread.usersp;
e4f17c43 158 set_tsk_thread_flag(p, TIF_FORK);
cea20ca3 159 p->fpu_counter = 0;
66cb5917 160 p->thread.io_bitmap_ptr = NULL;
1da177e4 161
ada85708 162 savesegment(gs, p->thread.gsindex);
7ce5a2b9 163 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
ada85708 164 savesegment(fs, p->thread.fsindex);
7ce5a2b9 165 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
ada85708
JF
166 savesegment(es, p->thread.es);
167 savesegment(ds, p->thread.ds);
7076aada
AV
168 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
169
1d4b4b29 170 if (unlikely(p->flags & PF_KTHREAD)) {
7076aada
AV
171 /* kernel thread */
172 memset(childregs, 0, sizeof(struct pt_regs));
173 childregs->sp = (unsigned long)childregs;
174 childregs->ss = __KERNEL_DS;
175 childregs->bx = sp; /* function */
176 childregs->bp = arg;
177 childregs->orig_ax = -1;
178 childregs->cs = __KERNEL_CS | get_kernel_rpl();
179 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
180 return 0;
181 }
1d4b4b29 182 *childregs = *current_pt_regs();
7076aada
AV
183
184 childregs->ax = 0;
1d4b4b29
AV
185 if (sp)
186 childregs->sp = sp;
1da177e4 187
66cb5917 188 err = -ENOMEM;
24f1e32c 189 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
66cb5917 190
d3a4f48d 191 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
cced4022
TM
192 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
193 IO_BITMAP_BYTES, GFP_KERNEL);
1da177e4
LT
194 if (!p->thread.io_bitmap_ptr) {
195 p->thread.io_bitmap_max = 0;
196 return -ENOMEM;
197 }
d3a4f48d 198 set_tsk_thread_flag(p, TIF_IO_BITMAP);
6612538c 199 }
1da177e4
LT
200
201 /*
202 * Set a new TLS for the child thread?
203 */
204 if (clone_flags & CLONE_SETTLS) {
205#ifdef CONFIG_IA32_EMULATION
206 if (test_thread_flag(TIF_IA32))
efd1ca52 207 err = do_set_thread_area(p, -1,
65ea5b03 208 (struct user_desc __user *)childregs->si, 0);
7de08b4e
GP
209 else
210#endif
211 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
212 if (err)
1da177e4
LT
213 goto out;
214 }
215 err = 0;
216out:
217 if (err && p->thread.io_bitmap_ptr) {
218 kfree(p->thread.io_bitmap_ptr);
219 p->thread.io_bitmap_max = 0;
220 }
66cb5917 221
1da177e4
LT
222 return err;
223}
224
e634d8fc
PA
225static void
226start_thread_common(struct pt_regs *regs, unsigned long new_ip,
227 unsigned long new_sp,
228 unsigned int _cs, unsigned int _ss, unsigned int _ds)
513ad84b 229{
ada85708 230 loadsegment(fs, 0);
e634d8fc
PA
231 loadsegment(es, _ds);
232 loadsegment(ds, _ds);
513ad84b 233 load_gs_index(0);
42dfc43e 234 current->thread.usersp = new_sp;
513ad84b
IM
235 regs->ip = new_ip;
236 regs->sp = new_sp;
c6ae41e7 237 this_cpu_write(old_rsp, new_sp);
e634d8fc
PA
238 regs->cs = _cs;
239 regs->ss = _ss;
a6f05a6a 240 regs->flags = X86_EFLAGS_IF;
513ad84b 241}
e634d8fc
PA
242
243void
244start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
245{
246 start_thread_common(regs, new_ip, new_sp,
247 __USER_CS, __USER_DS, 0);
248}
513ad84b 249
a6f05a6a
PA
250#ifdef CONFIG_IA32_EMULATION
251void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
252{
e634d8fc 253 start_thread_common(regs, new_ip, new_sp,
d1a797f3
PA
254 test_thread_flag(TIF_X32)
255 ? __USER_CS : __USER32_CS,
256 __USER_DS, __USER_DS);
a6f05a6a
PA
257}
258#endif
513ad84b 259
1da177e4
LT
260/*
261 * switch_to(x,y) should switch tasks from x to y.
262 *
6612538c 263 * This could still be optimized:
1da177e4
LT
264 * - fold all the options into a flag word and test it with a single test.
265 * - could test fs/gs bitsliced
099f318b
AK
266 *
267 * Kprobes not supported here. Set the probe on schedule instead.
8b96f011 268 * Function graph tracer not supported too.
1da177e4 269 */
8b96f011 270__notrace_funcgraph struct task_struct *
a88cde13 271__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
1da177e4 272{
87b935a0
JF
273 struct thread_struct *prev = &prev_p->thread;
274 struct thread_struct *next = &next_p->thread;
6612538c 275 int cpu = smp_processor_id();
1da177e4 276 struct tss_struct *tss = &per_cpu(init_tss, cpu);
478de5a9 277 unsigned fsindex, gsindex;
34ddc81a 278 fpu_switch_t fpu;
e07e23e1 279
7e16838d 280 fpu = switch_fpu_prepare(prev_p, next_p, cpu);
4903062b 281
1da177e4
LT
282 /*
283 * Reload esp0, LDT and the page table pointer:
284 */
7818a1e0 285 load_sp0(tss, next);
1da177e4 286
7de08b4e 287 /*
1da177e4
LT
288 * Switch DS and ES.
289 * This won't pick up thread selector changes, but I guess that is ok.
290 */
ada85708 291 savesegment(es, prev->es);
1da177e4 292 if (unlikely(next->es | prev->es))
7de08b4e 293 loadsegment(es, next->es);
ada85708
JF
294
295 savesegment(ds, prev->ds);
1da177e4
LT
296 if (unlikely(next->ds | prev->ds))
297 loadsegment(ds, next->ds);
298
478de5a9
JF
299
300 /* We must save %fs and %gs before load_TLS() because
301 * %fs and %gs may be cleared by load_TLS().
302 *
303 * (e.g. xen_load_tls())
304 */
305 savesegment(fs, fsindex);
306 savesegment(gs, gsindex);
307
1da177e4
LT
308 load_TLS(next, cpu);
309
3fe0a63e
JF
310 /*
311 * Leave lazy mode, flushing any hypercalls made here.
312 * This must be done before restoring TLS segments so
313 * the GDT and LDT are properly updated, and must be
314 * done before math_state_restore, so the TS bit is up
315 * to date.
316 */
224101ed 317 arch_end_context_switch(next_p);
3fe0a63e 318
7de08b4e 319 /*
1da177e4 320 * Switch FS and GS.
87b935a0
JF
321 *
322 * Segment register != 0 always requires a reload. Also
323 * reload when it has changed. When prev process used 64bit
324 * base always reload to avoid an information leak.
1da177e4 325 */
87b935a0
JF
326 if (unlikely(fsindex | next->fsindex | prev->fs)) {
327 loadsegment(fs, next->fsindex);
7de08b4e 328 /*
87b935a0
JF
329 * Check if the user used a selector != 0; if yes
330 * clear 64bit base, since overloaded base is always
331 * mapped to the Null selector
332 */
333 if (fsindex)
7de08b4e 334 prev->fs = 0;
1da177e4 335 }
87b935a0
JF
336 /* when next process has a 64bit base use it */
337 if (next->fs)
338 wrmsrl(MSR_FS_BASE, next->fs);
339 prev->fsindex = fsindex;
340
341 if (unlikely(gsindex | next->gsindex | prev->gs)) {
342 load_gs_index(next->gsindex);
343 if (gsindex)
7de08b4e 344 prev->gs = 0;
1da177e4 345 }
87b935a0
JF
346 if (next->gs)
347 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
348 prev->gsindex = gsindex;
1da177e4 349
34ddc81a
LT
350 switch_fpu_finish(next_p, fpu);
351
7de08b4e 352 /*
45948d77 353 * Switch the PDA and FPU contexts.
1da177e4 354 */
c6ae41e7
AS
355 prev->usersp = this_cpu_read(old_rsp);
356 this_cpu_write(old_rsp, next->usersp);
357 this_cpu_write(current_task, next_p);
18bd057b 358
c6ae41e7 359 this_cpu_write(kernel_stack,
87b935a0 360 (unsigned long)task_stack_page(next_p) +
9af45651 361 THREAD_SIZE - KERNEL_STACK_OFFSET);
1da177e4
LT
362
363 /*
d3a4f48d 364 * Now maybe reload the debug registers and handle I/O bitmaps
1da177e4 365 */
eee3af4a
MM
366 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
367 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
d3a4f48d 368 __switch_to_xtra(prev_p, next_p, tss);
1da177e4
LT
369
370 return prev_p;
371}
372
1da177e4
LT
373void set_personality_64bit(void)
374{
375 /* inherit personality from parent */
376
377 /* Make sure to be in 64bit mode */
6612538c 378 clear_thread_flag(TIF_IA32);
6bd33008 379 clear_thread_flag(TIF_ADDR32);
bb212724 380 clear_thread_flag(TIF_X32);
1da177e4 381
375906f8
SW
382 /* Ensure the corresponding mm is not marked. */
383 if (current->mm)
384 current->mm->context.ia32_compat = 0;
385
1da177e4
LT
386 /* TBD: overwrites user setup. Should have two bits.
387 But 64bit processes have always behaved this way,
388 so it's not too bad. The main problem is just that
6612538c 389 32bit childs are affected again. */
1da177e4
LT
390 current->personality &= ~READ_IMPLIES_EXEC;
391}
392
d1a797f3 393void set_personality_ia32(bool x32)
05d43ed8
PA
394{
395 /* inherit personality from parent */
396
397 /* Make sure to be in 32bit mode */
6bd33008 398 set_thread_flag(TIF_ADDR32);
05d43ed8 399
375906f8
SW
400 /* Mark the associated mm as containing 32-bit tasks. */
401 if (current->mm)
402 current->mm->context.ia32_compat = 1;
403
d1a797f3
PA
404 if (x32) {
405 clear_thread_flag(TIF_IA32);
406 set_thread_flag(TIF_X32);
407 current->personality &= ~READ_IMPLIES_EXEC;
ce5f7a99
BP
408 /* is_compat_task() uses the presence of the x32
409 syscall bit flag to determine compat status */
410 current_thread_info()->status &= ~TS_COMPAT;
d1a797f3
PA
411 } else {
412 set_thread_flag(TIF_IA32);
413 clear_thread_flag(TIF_X32);
414 current->personality |= force_personality32;
415 /* Prepare the first "return" to user space */
416 current_thread_info()->status |= TS_COMPAT;
417 }
05d43ed8 418}
febb72a6 419EXPORT_SYMBOL_GPL(set_personality_ia32);
05d43ed8 420
1da177e4
LT
421unsigned long get_wchan(struct task_struct *p)
422{
423 unsigned long stack;
7de08b4e 424 u64 fp, ip;
1da177e4
LT
425 int count = 0;
426
7de08b4e
GP
427 if (!p || p == current || p->state == TASK_RUNNING)
428 return 0;
57eafdc2 429 stack = (unsigned long)task_stack_page(p);
e1e23bb0 430 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
1da177e4 431 return 0;
faca6227 432 fp = *(u64 *)(p->thread.sp);
7de08b4e 433 do {
a88cde13 434 if (fp < (unsigned long)stack ||
e1e23bb0 435 fp >= (unsigned long)stack+THREAD_SIZE)
7de08b4e 436 return 0;
65ea5b03
PA
437 ip = *(u64 *)(fp+8);
438 if (!in_sched_functions(ip))
439 return ip;
7de08b4e
GP
440 fp = *(u64 *)fp;
441 } while (count++ < 16);
1da177e4
LT
442 return 0;
443}
444
445long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
7de08b4e
GP
446{
447 int ret = 0;
1da177e4
LT
448 int doit = task == current;
449 int cpu;
450
7de08b4e 451 switch (code) {
1da177e4 452 case ARCH_SET_GS:
84929801 453 if (addr >= TASK_SIZE_OF(task))
7de08b4e 454 return -EPERM;
1da177e4 455 cpu = get_cpu();
7de08b4e 456 /* handle small bases via the GDT because that's faster to
1da177e4 457 switch. */
7de08b4e
GP
458 if (addr <= 0xffffffff) {
459 set_32bit_tls(task, GS_TLS, addr);
460 if (doit) {
1da177e4 461 load_TLS(&task->thread, cpu);
7de08b4e 462 load_gs_index(GS_TLS_SEL);
1da177e4 463 }
7de08b4e 464 task->thread.gsindex = GS_TLS_SEL;
1da177e4 465 task->thread.gs = 0;
7de08b4e 466 } else {
1da177e4
LT
467 task->thread.gsindex = 0;
468 task->thread.gs = addr;
469 if (doit) {
a88cde13 470 load_gs_index(0);
715c85b1 471 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
7de08b4e 472 }
1da177e4
LT
473 }
474 put_cpu();
475 break;
476 case ARCH_SET_FS:
477 /* Not strictly needed for fs, but do it for symmetry
478 with gs */
84929801 479 if (addr >= TASK_SIZE_OF(task))
6612538c 480 return -EPERM;
1da177e4 481 cpu = get_cpu();
6612538c 482 /* handle small bases via the GDT because that's faster to
1da177e4 483 switch. */
6612538c 484 if (addr <= 0xffffffff) {
1da177e4 485 set_32bit_tls(task, FS_TLS, addr);
6612538c
HS
486 if (doit) {
487 load_TLS(&task->thread, cpu);
ada85708 488 loadsegment(fs, FS_TLS_SEL);
1da177e4
LT
489 }
490 task->thread.fsindex = FS_TLS_SEL;
491 task->thread.fs = 0;
6612538c 492 } else {
1da177e4
LT
493 task->thread.fsindex = 0;
494 task->thread.fs = addr;
495 if (doit) {
496 /* set the selector to 0 to not confuse
497 __switch_to */
ada85708 498 loadsegment(fs, 0);
715c85b1 499 ret = wrmsrl_safe(MSR_FS_BASE, addr);
1da177e4
LT
500 }
501 }
502 put_cpu();
503 break;
6612538c
HS
504 case ARCH_GET_FS: {
505 unsigned long base;
1da177e4
LT
506 if (task->thread.fsindex == FS_TLS_SEL)
507 base = read_32bit_tls(task, FS_TLS);
a88cde13 508 else if (doit)
1da177e4 509 rdmsrl(MSR_FS_BASE, base);
a88cde13 510 else
1da177e4 511 base = task->thread.fs;
6612538c
HS
512 ret = put_user(base, (unsigned long __user *)addr);
513 break;
1da177e4 514 }
6612538c 515 case ARCH_GET_GS: {
1da177e4 516 unsigned long base;
97c2803c 517 unsigned gsindex;
1da177e4
LT
518 if (task->thread.gsindex == GS_TLS_SEL)
519 base = read_32bit_tls(task, GS_TLS);
97c2803c 520 else if (doit) {
ada85708 521 savesegment(gs, gsindex);
97c2803c
JB
522 if (gsindex)
523 rdmsrl(MSR_KERNEL_GS_BASE, base);
524 else
525 base = task->thread.gs;
7de08b4e 526 } else
1da177e4 527 base = task->thread.gs;
6612538c 528 ret = put_user(base, (unsigned long __user *)addr);
1da177e4
LT
529 break;
530 }
531
532 default:
533 ret = -EINVAL;
534 break;
6612538c 535 }
1da177e4 536
6612538c
HS
537 return ret;
538}
1da177e4
LT
539
540long sys_arch_prctl(int code, unsigned long addr)
541{
542 return do_arch_prctl(current, code, addr);
1da177e4
LT
543}
544
89240ba0
SS
545unsigned long KSTK_ESP(struct task_struct *task)
546{
547 return (test_tsk_thread_flag(task, TIF_IA32)) ?
548 (task_pt_regs(task)->sp) : ((task)->thread.usersp);
549}
This page took 0.728339 seconds and 5 git commands to generate.