Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[deliverable/linux.git] / arch / x86 / kernel / process_64.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6612538c 6 *
1da177e4
LT
7 * X86-64 port
8 * Andi Kleen.
76e4f660
AR
9 *
10 * CPU hotplug support - ashok.raj@intel.com
1da177e4
LT
11 */
12
13/*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
76e4f660 17#include <linux/cpu.h>
1da177e4
LT
18#include <linux/errno.h>
19#include <linux/sched.h>
6612538c 20#include <linux/fs.h>
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/elfcore.h>
24#include <linux/smp.h>
25#include <linux/slab.h>
26#include <linux/user.h>
1da177e4
LT
27#include <linux/interrupt.h>
28#include <linux/delay.h>
6612538c 29#include <linux/module.h>
1da177e4 30#include <linux/ptrace.h>
95833c83 31#include <linux/notifier.h>
c6fd91f0 32#include <linux/kprobes.h>
1eeb66a1 33#include <linux/kdebug.h>
529e25f6 34#include <linux/prctl.h>
7de08b4e
GP
35#include <linux/uaccess.h>
36#include <linux/io.h>
8b96f011 37#include <linux/ftrace.h>
1da177e4 38
1da177e4 39#include <asm/pgtable.h>
1da177e4
LT
40#include <asm/processor.h>
41#include <asm/i387.h>
1361b83a 42#include <asm/fpu-internal.h>
1da177e4 43#include <asm/mmu_context.h>
1da177e4 44#include <asm/prctl.h>
1da177e4
LT
45#include <asm/desc.h>
46#include <asm/proto.h>
47#include <asm/ia32.h>
95833c83 48#include <asm/idle.h>
bbc1f698 49#include <asm/syscalls.h>
66cb5917 50#include <asm/debugreg.h>
f05e798a 51#include <asm/switch_to.h>
1da177e4
LT
52
53asmlinkage extern void ret_from_fork(void);
54
2605fc21 55__visible DEFINE_PER_CPU(unsigned long, old_rsp);
1da177e4 56
6612538c 57/* Prints also some state that isn't saved in the pt_regs */
e2ce07c8 58void __show_regs(struct pt_regs *regs, int all)
1da177e4
LT
59{
60 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
bb1995d5 61 unsigned long d0, d1, d2, d3, d6, d7;
6612538c
HS
62 unsigned int fsindex, gsindex;
63 unsigned int ds, cs, es;
814e2c84 64
d015a092 65 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
5f01c988 66 printk_address(regs->ip);
d015a092 67 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
8092c654 68 regs->sp, regs->flags);
d015a092 69 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
65ea5b03 70 regs->ax, regs->bx, regs->cx);
d015a092 71 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
65ea5b03 72 regs->dx, regs->si, regs->di);
d015a092 73 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
65ea5b03 74 regs->bp, regs->r8, regs->r9);
d015a092 75 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
7de08b4e 76 regs->r10, regs->r11, regs->r12);
d015a092 77 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
7de08b4e 78 regs->r13, regs->r14, regs->r15);
1da177e4 79
7de08b4e
GP
80 asm("movl %%ds,%0" : "=r" (ds));
81 asm("movl %%cs,%0" : "=r" (cs));
82 asm("movl %%es,%0" : "=r" (es));
1da177e4
LT
83 asm("movl %%fs,%0" : "=r" (fsindex));
84 asm("movl %%gs,%0" : "=r" (gsindex));
85
86 rdmsrl(MSR_FS_BASE, fs);
7de08b4e
GP
87 rdmsrl(MSR_GS_BASE, gs);
88 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
1da177e4 89
e2ce07c8
PE
90 if (!all)
91 return;
1da177e4 92
f51c9452
GOC
93 cr0 = read_cr0();
94 cr2 = read_cr2();
95 cr3 = read_cr3();
96 cr4 = read_cr4();
1da177e4 97
d015a092 98 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
7de08b4e 99 fs, fsindex, gs, gsindex, shadowgs);
d015a092 100 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
8092c654 101 es, cr0);
d015a092 102 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
8092c654 103 cr4);
bb1995d5
AS
104
105 get_debugreg(d0, 0);
106 get_debugreg(d1, 1);
107 get_debugreg(d2, 2);
bb1995d5
AS
108 get_debugreg(d3, 3);
109 get_debugreg(d6, 6);
110 get_debugreg(d7, 7);
4338774c
DJ
111
112 /* Only print out debug registers if they are in their non-default state. */
113 if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
114 (d6 == DR6_RESERVED) && (d7 == 0x400))
115 return;
116
117 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
d015a092 118 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
4338774c 119
1da177e4
LT
120}
121
1da177e4
LT
122void release_thread(struct task_struct *dead_task)
123{
124 if (dead_task->mm) {
125 if (dead_task->mm->context.size) {
349eab6e 126 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
c767a54b
JP
127 dead_task->comm,
128 dead_task->mm->context.ldt,
129 dead_task->mm->context.size);
1da177e4
LT
130 BUG();
131 }
132 }
133}
134
135static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
136{
6612538c 137 struct user_desc ud = {
1da177e4
LT
138 .base_addr = addr,
139 .limit = 0xfffff,
140 .seg_32bit = 1,
141 .limit_in_pages = 1,
142 .useable = 1,
143 };
ade1af77 144 struct desc_struct *desc = t->thread.tls_array;
1da177e4 145 desc += tls;
80fbb69a 146 fill_ldt(desc, &ud);
1da177e4
LT
147}
148
149static inline u32 read_32bit_tls(struct task_struct *t, int tls)
150{
91394eb0 151 return get_desc_base(&t->thread.tls_array[tls]);
1da177e4
LT
152}
153
6f2c55b8 154int copy_thread(unsigned long clone_flags, unsigned long sp,
afa86fc4 155 unsigned long arg, struct task_struct *p)
1da177e4
LT
156{
157 int err;
7de08b4e 158 struct pt_regs *childregs;
1da177e4
LT
159 struct task_struct *me = current;
160
7076aada
AV
161 p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
162 childregs = task_pt_regs(p);
faca6227 163 p->thread.sp = (unsigned long) childregs;
faca6227 164 p->thread.usersp = me->thread.usersp;
e4f17c43 165 set_tsk_thread_flag(p, TIF_FORK);
c375f15a 166 p->thread.fpu_counter = 0;
66cb5917 167 p->thread.io_bitmap_ptr = NULL;
1da177e4 168
ada85708 169 savesegment(gs, p->thread.gsindex);
7ce5a2b9 170 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
ada85708 171 savesegment(fs, p->thread.fsindex);
7ce5a2b9 172 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
ada85708
JF
173 savesegment(es, p->thread.es);
174 savesegment(ds, p->thread.ds);
7076aada
AV
175 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
176
1d4b4b29 177 if (unlikely(p->flags & PF_KTHREAD)) {
7076aada
AV
178 /* kernel thread */
179 memset(childregs, 0, sizeof(struct pt_regs));
180 childregs->sp = (unsigned long)childregs;
181 childregs->ss = __KERNEL_DS;
182 childregs->bx = sp; /* function */
183 childregs->bp = arg;
184 childregs->orig_ax = -1;
185 childregs->cs = __KERNEL_CS | get_kernel_rpl();
1adfa76a 186 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
7076aada
AV
187 return 0;
188 }
1d4b4b29 189 *childregs = *current_pt_regs();
7076aada
AV
190
191 childregs->ax = 0;
1d4b4b29
AV
192 if (sp)
193 childregs->sp = sp;
1da177e4 194
66cb5917 195 err = -ENOMEM;
24f1e32c 196 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
66cb5917 197
d3a4f48d 198 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
cced4022
TM
199 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
200 IO_BITMAP_BYTES, GFP_KERNEL);
1da177e4
LT
201 if (!p->thread.io_bitmap_ptr) {
202 p->thread.io_bitmap_max = 0;
203 return -ENOMEM;
204 }
d3a4f48d 205 set_tsk_thread_flag(p, TIF_IO_BITMAP);
6612538c 206 }
1da177e4
LT
207
208 /*
209 * Set a new TLS for the child thread?
210 */
211 if (clone_flags & CLONE_SETTLS) {
212#ifdef CONFIG_IA32_EMULATION
213 if (test_thread_flag(TIF_IA32))
efd1ca52 214 err = do_set_thread_area(p, -1,
65ea5b03 215 (struct user_desc __user *)childregs->si, 0);
7de08b4e
GP
216 else
217#endif
218 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
219 if (err)
1da177e4
LT
220 goto out;
221 }
222 err = 0;
223out:
224 if (err && p->thread.io_bitmap_ptr) {
225 kfree(p->thread.io_bitmap_ptr);
226 p->thread.io_bitmap_max = 0;
227 }
66cb5917 228
1da177e4
LT
229 return err;
230}
231
e634d8fc
PA
232static void
233start_thread_common(struct pt_regs *regs, unsigned long new_ip,
234 unsigned long new_sp,
235 unsigned int _cs, unsigned int _ss, unsigned int _ds)
513ad84b 236{
ada85708 237 loadsegment(fs, 0);
e634d8fc
PA
238 loadsegment(es, _ds);
239 loadsegment(ds, _ds);
513ad84b 240 load_gs_index(0);
42dfc43e 241 current->thread.usersp = new_sp;
513ad84b
IM
242 regs->ip = new_ip;
243 regs->sp = new_sp;
c6ae41e7 244 this_cpu_write(old_rsp, new_sp);
e634d8fc
PA
245 regs->cs = _cs;
246 regs->ss = _ss;
a6f05a6a 247 regs->flags = X86_EFLAGS_IF;
513ad84b 248}
e634d8fc
PA
249
250void
251start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
252{
253 start_thread_common(regs, new_ip, new_sp,
254 __USER_CS, __USER_DS, 0);
255}
513ad84b 256
a6f05a6a
PA
257#ifdef CONFIG_IA32_EMULATION
258void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
259{
e634d8fc 260 start_thread_common(regs, new_ip, new_sp,
d1a797f3
PA
261 test_thread_flag(TIF_X32)
262 ? __USER_CS : __USER32_CS,
263 __USER_DS, __USER_DS);
a6f05a6a
PA
264}
265#endif
513ad84b 266
1da177e4
LT
267/*
268 * switch_to(x,y) should switch tasks from x to y.
269 *
6612538c 270 * This could still be optimized:
1da177e4
LT
271 * - fold all the options into a flag word and test it with a single test.
272 * - could test fs/gs bitsliced
099f318b
AK
273 *
274 * Kprobes not supported here. Set the probe on schedule instead.
8b96f011 275 * Function graph tracer not supported too.
1da177e4 276 */
35ea7903 277__visible __notrace_funcgraph struct task_struct *
a88cde13 278__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
1da177e4 279{
87b935a0
JF
280 struct thread_struct *prev = &prev_p->thread;
281 struct thread_struct *next = &next_p->thread;
6612538c 282 int cpu = smp_processor_id();
1da177e4 283 struct tss_struct *tss = &per_cpu(init_tss, cpu);
478de5a9 284 unsigned fsindex, gsindex;
34ddc81a 285 fpu_switch_t fpu;
e07e23e1 286
7e16838d 287 fpu = switch_fpu_prepare(prev_p, next_p, cpu);
4903062b 288
1da177e4
LT
289 /*
290 * Reload esp0, LDT and the page table pointer:
291 */
7818a1e0 292 load_sp0(tss, next);
1da177e4 293
7de08b4e 294 /*
1da177e4
LT
295 * Switch DS and ES.
296 * This won't pick up thread selector changes, but I guess that is ok.
297 */
ada85708 298 savesegment(es, prev->es);
1da177e4 299 if (unlikely(next->es | prev->es))
7de08b4e 300 loadsegment(es, next->es);
ada85708
JF
301
302 savesegment(ds, prev->ds);
1da177e4
LT
303 if (unlikely(next->ds | prev->ds))
304 loadsegment(ds, next->ds);
305
478de5a9
JF
306
307 /* We must save %fs and %gs before load_TLS() because
308 * %fs and %gs may be cleared by load_TLS().
309 *
310 * (e.g. xen_load_tls())
311 */
312 savesegment(fs, fsindex);
313 savesegment(gs, gsindex);
314
1da177e4
LT
315 load_TLS(next, cpu);
316
3fe0a63e
JF
317 /*
318 * Leave lazy mode, flushing any hypercalls made here.
319 * This must be done before restoring TLS segments so
320 * the GDT and LDT are properly updated, and must be
321 * done before math_state_restore, so the TS bit is up
322 * to date.
323 */
224101ed 324 arch_end_context_switch(next_p);
3fe0a63e 325
7de08b4e 326 /*
1da177e4 327 * Switch FS and GS.
87b935a0
JF
328 *
329 * Segment register != 0 always requires a reload. Also
330 * reload when it has changed. When prev process used 64bit
331 * base always reload to avoid an information leak.
1da177e4 332 */
87b935a0
JF
333 if (unlikely(fsindex | next->fsindex | prev->fs)) {
334 loadsegment(fs, next->fsindex);
7de08b4e 335 /*
87b935a0
JF
336 * Check if the user used a selector != 0; if yes
337 * clear 64bit base, since overloaded base is always
338 * mapped to the Null selector
339 */
340 if (fsindex)
7de08b4e 341 prev->fs = 0;
1da177e4 342 }
87b935a0
JF
343 /* when next process has a 64bit base use it */
344 if (next->fs)
345 wrmsrl(MSR_FS_BASE, next->fs);
346 prev->fsindex = fsindex;
347
348 if (unlikely(gsindex | next->gsindex | prev->gs)) {
349 load_gs_index(next->gsindex);
350 if (gsindex)
7de08b4e 351 prev->gs = 0;
1da177e4 352 }
87b935a0
JF
353 if (next->gs)
354 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
355 prev->gsindex = gsindex;
1da177e4 356
34ddc81a
LT
357 switch_fpu_finish(next_p, fpu);
358
7de08b4e 359 /*
45948d77 360 * Switch the PDA and FPU contexts.
1da177e4 361 */
c6ae41e7
AS
362 prev->usersp = this_cpu_read(old_rsp);
363 this_cpu_write(old_rsp, next->usersp);
364 this_cpu_write(current_task, next_p);
18bd057b 365
c2daa3be
PZ
366 /*
367 * If it were not for PREEMPT_ACTIVE we could guarantee that the
368 * preempt_count of all tasks was equal here and this would not be
369 * needed.
370 */
371 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
372 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
373
c6ae41e7 374 this_cpu_write(kernel_stack,
87b935a0 375 (unsigned long)task_stack_page(next_p) +
9af45651 376 THREAD_SIZE - KERNEL_STACK_OFFSET);
1da177e4
LT
377
378 /*
d3a4f48d 379 * Now maybe reload the debug registers and handle I/O bitmaps
1da177e4 380 */
eee3af4a
MM
381 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
382 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
d3a4f48d 383 __switch_to_xtra(prev_p, next_p, tss);
1da177e4
LT
384
385 return prev_p;
386}
387
1da177e4
LT
388void set_personality_64bit(void)
389{
390 /* inherit personality from parent */
391
392 /* Make sure to be in 64bit mode */
6612538c 393 clear_thread_flag(TIF_IA32);
6bd33008 394 clear_thread_flag(TIF_ADDR32);
bb212724 395 clear_thread_flag(TIF_X32);
1da177e4 396
375906f8
SW
397 /* Ensure the corresponding mm is not marked. */
398 if (current->mm)
399 current->mm->context.ia32_compat = 0;
400
1da177e4
LT
401 /* TBD: overwrites user setup. Should have two bits.
402 But 64bit processes have always behaved this way,
403 so it's not too bad. The main problem is just that
6612538c 404 32bit childs are affected again. */
1da177e4
LT
405 current->personality &= ~READ_IMPLIES_EXEC;
406}
407
d1a797f3 408void set_personality_ia32(bool x32)
05d43ed8
PA
409{
410 /* inherit personality from parent */
411
412 /* Make sure to be in 32bit mode */
6bd33008 413 set_thread_flag(TIF_ADDR32);
05d43ed8 414
375906f8 415 /* Mark the associated mm as containing 32-bit tasks. */
d1a797f3
PA
416 if (x32) {
417 clear_thread_flag(TIF_IA32);
418 set_thread_flag(TIF_X32);
b24dc8da
ON
419 if (current->mm)
420 current->mm->context.ia32_compat = TIF_X32;
d1a797f3 421 current->personality &= ~READ_IMPLIES_EXEC;
ce5f7a99
BP
422 /* is_compat_task() uses the presence of the x32
423 syscall bit flag to determine compat status */
424 current_thread_info()->status &= ~TS_COMPAT;
d1a797f3
PA
425 } else {
426 set_thread_flag(TIF_IA32);
427 clear_thread_flag(TIF_X32);
b24dc8da
ON
428 if (current->mm)
429 current->mm->context.ia32_compat = TIF_IA32;
d1a797f3
PA
430 current->personality |= force_personality32;
431 /* Prepare the first "return" to user space */
432 current_thread_info()->status |= TS_COMPAT;
433 }
05d43ed8 434}
febb72a6 435EXPORT_SYMBOL_GPL(set_personality_ia32);
05d43ed8 436
1da177e4
LT
437unsigned long get_wchan(struct task_struct *p)
438{
439 unsigned long stack;
7de08b4e 440 u64 fp, ip;
1da177e4
LT
441 int count = 0;
442
7de08b4e
GP
443 if (!p || p == current || p->state == TASK_RUNNING)
444 return 0;
57eafdc2 445 stack = (unsigned long)task_stack_page(p);
e1e23bb0 446 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
1da177e4 447 return 0;
faca6227 448 fp = *(u64 *)(p->thread.sp);
7de08b4e 449 do {
a88cde13 450 if (fp < (unsigned long)stack ||
e1e23bb0 451 fp >= (unsigned long)stack+THREAD_SIZE)
7de08b4e 452 return 0;
65ea5b03
PA
453 ip = *(u64 *)(fp+8);
454 if (!in_sched_functions(ip))
455 return ip;
7de08b4e
GP
456 fp = *(u64 *)fp;
457 } while (count++ < 16);
1da177e4
LT
458 return 0;
459}
460
461long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
7de08b4e
GP
462{
463 int ret = 0;
1da177e4
LT
464 int doit = task == current;
465 int cpu;
466
7de08b4e 467 switch (code) {
1da177e4 468 case ARCH_SET_GS:
84929801 469 if (addr >= TASK_SIZE_OF(task))
7de08b4e 470 return -EPERM;
1da177e4 471 cpu = get_cpu();
7de08b4e 472 /* handle small bases via the GDT because that's faster to
1da177e4 473 switch. */
7de08b4e
GP
474 if (addr <= 0xffffffff) {
475 set_32bit_tls(task, GS_TLS, addr);
476 if (doit) {
1da177e4 477 load_TLS(&task->thread, cpu);
7de08b4e 478 load_gs_index(GS_TLS_SEL);
1da177e4 479 }
7de08b4e 480 task->thread.gsindex = GS_TLS_SEL;
1da177e4 481 task->thread.gs = 0;
7de08b4e 482 } else {
1da177e4
LT
483 task->thread.gsindex = 0;
484 task->thread.gs = addr;
485 if (doit) {
a88cde13 486 load_gs_index(0);
715c85b1 487 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
7de08b4e 488 }
1da177e4
LT
489 }
490 put_cpu();
491 break;
492 case ARCH_SET_FS:
493 /* Not strictly needed for fs, but do it for symmetry
494 with gs */
84929801 495 if (addr >= TASK_SIZE_OF(task))
6612538c 496 return -EPERM;
1da177e4 497 cpu = get_cpu();
6612538c 498 /* handle small bases via the GDT because that's faster to
1da177e4 499 switch. */
6612538c 500 if (addr <= 0xffffffff) {
1da177e4 501 set_32bit_tls(task, FS_TLS, addr);
6612538c
HS
502 if (doit) {
503 load_TLS(&task->thread, cpu);
ada85708 504 loadsegment(fs, FS_TLS_SEL);
1da177e4
LT
505 }
506 task->thread.fsindex = FS_TLS_SEL;
507 task->thread.fs = 0;
6612538c 508 } else {
1da177e4
LT
509 task->thread.fsindex = 0;
510 task->thread.fs = addr;
511 if (doit) {
512 /* set the selector to 0 to not confuse
513 __switch_to */
ada85708 514 loadsegment(fs, 0);
715c85b1 515 ret = wrmsrl_safe(MSR_FS_BASE, addr);
1da177e4
LT
516 }
517 }
518 put_cpu();
519 break;
6612538c
HS
520 case ARCH_GET_FS: {
521 unsigned long base;
1da177e4
LT
522 if (task->thread.fsindex == FS_TLS_SEL)
523 base = read_32bit_tls(task, FS_TLS);
a88cde13 524 else if (doit)
1da177e4 525 rdmsrl(MSR_FS_BASE, base);
a88cde13 526 else
1da177e4 527 base = task->thread.fs;
6612538c
HS
528 ret = put_user(base, (unsigned long __user *)addr);
529 break;
1da177e4 530 }
6612538c 531 case ARCH_GET_GS: {
1da177e4 532 unsigned long base;
97c2803c 533 unsigned gsindex;
1da177e4
LT
534 if (task->thread.gsindex == GS_TLS_SEL)
535 base = read_32bit_tls(task, GS_TLS);
97c2803c 536 else if (doit) {
ada85708 537 savesegment(gs, gsindex);
97c2803c
JB
538 if (gsindex)
539 rdmsrl(MSR_KERNEL_GS_BASE, base);
540 else
541 base = task->thread.gs;
7de08b4e 542 } else
1da177e4 543 base = task->thread.gs;
6612538c 544 ret = put_user(base, (unsigned long __user *)addr);
1da177e4
LT
545 break;
546 }
547
548 default:
549 ret = -EINVAL;
550 break;
6612538c 551 }
1da177e4 552
6612538c
HS
553 return ret;
554}
1da177e4
LT
555
556long sys_arch_prctl(int code, unsigned long addr)
557{
558 return do_arch_prctl(current, code, addr);
1da177e4
LT
559}
560
89240ba0
SS
561unsigned long KSTK_ESP(struct task_struct *task)
562{
563 return (test_tsk_thread_flag(task, TIF_IA32)) ?
564 (task_pt_regs(task)->sp) : ((task)->thread.usersp);
565}
This page took 0.83328 seconds and 5 git commands to generate.