Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6
[deliverable/linux.git] / arch / x86 / kernel / process_64.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6612538c 6 *
1da177e4
LT
7 * X86-64 port
8 * Andi Kleen.
76e4f660
AR
9 *
10 * CPU hotplug support - ashok.raj@intel.com
1da177e4
LT
11 */
12
13/*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
42059429 17#include <linux/stackprotector.h>
76e4f660 18#include <linux/cpu.h>
1da177e4
LT
19#include <linux/errno.h>
20#include <linux/sched.h>
6612538c 21#include <linux/fs.h>
1da177e4
LT
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/elfcore.h>
25#include <linux/smp.h>
26#include <linux/slab.h>
27#include <linux/user.h>
1da177e4
LT
28#include <linux/interrupt.h>
29#include <linux/delay.h>
6612538c 30#include <linux/module.h>
1da177e4 31#include <linux/ptrace.h>
95833c83 32#include <linux/notifier.h>
c6fd91f0 33#include <linux/kprobes.h>
1eeb66a1 34#include <linux/kdebug.h>
02290683 35#include <linux/tick.h>
529e25f6 36#include <linux/prctl.h>
7de08b4e
GP
37#include <linux/uaccess.h>
38#include <linux/io.h>
8b96f011 39#include <linux/ftrace.h>
1da177e4 40
1da177e4
LT
41#include <asm/pgtable.h>
42#include <asm/system.h>
1da177e4
LT
43#include <asm/processor.h>
44#include <asm/i387.h>
45#include <asm/mmu_context.h>
1da177e4 46#include <asm/prctl.h>
1da177e4
LT
47#include <asm/desc.h>
48#include <asm/proto.h>
49#include <asm/ia32.h>
95833c83 50#include <asm/idle.h>
bbc1f698 51#include <asm/syscalls.h>
66cb5917 52#include <asm/debugreg.h>
1da177e4 53
c882e0fe
RS
54#include <trace/events/power.h>
55
1da177e4
LT
56asmlinkage extern void ret_from_fork(void);
57
3d1e42a7 58DEFINE_PER_CPU(unsigned long, old_rsp);
c2558e0e 59static DEFINE_PER_CPU(unsigned char, is_idle);
3d1e42a7 60
e041c683 61static ATOMIC_NOTIFIER_HEAD(idle_notifier);
95833c83
AK
62
63void idle_notifier_register(struct notifier_block *n)
64{
e041c683 65 atomic_notifier_chain_register(&idle_notifier, n);
95833c83 66}
c7d87d79
VP
67EXPORT_SYMBOL_GPL(idle_notifier_register);
68
69void idle_notifier_unregister(struct notifier_block *n)
70{
71 atomic_notifier_chain_unregister(&idle_notifier, n);
72}
73EXPORT_SYMBOL_GPL(idle_notifier_unregister);
95833c83 74
95833c83
AK
75void enter_idle(void)
76{
c2558e0e 77 percpu_write(is_idle, 1);
e041c683 78 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
95833c83
AK
79}
80
81static void __exit_idle(void)
82{
c2558e0e 83 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
a15da49d 84 return;
e041c683 85 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
95833c83
AK
86}
87
88/* Called from interrupts to signify idle end */
89void exit_idle(void)
90{
a15da49d
AK
91 /* idle loop has pid 0 */
92 if (current->pid)
95833c83
AK
93 return;
94 __exit_idle();
95}
96
913da64b 97#ifndef CONFIG_SMP
76e4f660
AR
98static inline void play_dead(void)
99{
100 BUG();
101}
913da64b 102#endif
76e4f660 103
1da177e4
LT
104/*
105 * The idle thread. There's no useful work to be
106 * done, so just try to conserve power and have a
107 * low exit latency (ie sit in a loop waiting for
108 * somebody to say that they'd like to reschedule)
109 */
b10db7f0 110void cpu_idle(void)
1da177e4 111{
495ab9c0 112 current_thread_info()->status |= TS_POLLING;
ce22bd92 113
ce22bd92 114 /*
5c79d2a5
TH
115 * If we're the non-boot CPU, nothing set the stack canary up
116 * for us. CPU0 already has it initialized but no harm in
117 * doing it again. This is a good place for updating it, as
118 * we wont ever return from this function (so the invalid
119 * canaries already on the stack wont ever trigger).
ce22bd92 120 */
18aa8bb1
IM
121 boot_init_stack_canary();
122
1da177e4
LT
123 /* endless idle loop with no priority at all */
124 while (1) {
b8f8c3cf 125 tick_nohz_stop_sched_tick(1);
1da177e4 126 while (!need_resched()) {
1da177e4 127
1da177e4 128 rmb();
6ddd2a27 129
76e4f660
AR
130 if (cpu_is_offline(smp_processor_id()))
131 play_dead();
d331e739
VP
132 /*
133 * Idle routines should keep interrupts disabled
134 * from here on, until they go to idle.
135 * Otherwise, idle callbacks can misfire.
136 */
137 local_irq_disable();
95833c83 138 enter_idle();
81d68a96
SR
139 /* Don't trace irqs off for idle */
140 stop_critical_timings();
6ddd2a27 141 pm_idle();
81d68a96 142 start_critical_timings();
c882e0fe
RS
143
144 trace_power_end(smp_processor_id());
145
a15da49d
AK
146 /* In many cases the interrupt that ended idle
147 has already called exit_idle. But some idle
148 loops can be woken up without interrupt. */
95833c83 149 __exit_idle();
1da177e4
LT
150 }
151
02290683 152 tick_nohz_restart_sched_tick();
5bfb5d69 153 preempt_enable_no_resched();
1da177e4 154 schedule();
5bfb5d69 155 preempt_disable();
1da177e4
LT
156 }
157}
158
6612538c 159/* Prints also some state that isn't saved in the pt_regs */
e2ce07c8 160void __show_regs(struct pt_regs *regs, int all)
1da177e4
LT
161{
162 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
bb1995d5 163 unsigned long d0, d1, d2, d3, d6, d7;
6612538c
HS
164 unsigned int fsindex, gsindex;
165 unsigned int ds, cs, es;
814e2c84
AI
166
167 show_regs_common();
d015a092 168 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
aafbd7eb 169 printk_address(regs->ip, 1);
d015a092 170 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
8092c654 171 regs->sp, regs->flags);
d015a092 172 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
65ea5b03 173 regs->ax, regs->bx, regs->cx);
d015a092 174 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
65ea5b03 175 regs->dx, regs->si, regs->di);
d015a092 176 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
65ea5b03 177 regs->bp, regs->r8, regs->r9);
d015a092 178 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
7de08b4e 179 regs->r10, regs->r11, regs->r12);
d015a092 180 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
7de08b4e 181 regs->r13, regs->r14, regs->r15);
1da177e4 182
7de08b4e
GP
183 asm("movl %%ds,%0" : "=r" (ds));
184 asm("movl %%cs,%0" : "=r" (cs));
185 asm("movl %%es,%0" : "=r" (es));
1da177e4
LT
186 asm("movl %%fs,%0" : "=r" (fsindex));
187 asm("movl %%gs,%0" : "=r" (gsindex));
188
189 rdmsrl(MSR_FS_BASE, fs);
7de08b4e
GP
190 rdmsrl(MSR_GS_BASE, gs);
191 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
1da177e4 192
e2ce07c8
PE
193 if (!all)
194 return;
1da177e4 195
f51c9452
GOC
196 cr0 = read_cr0();
197 cr2 = read_cr2();
198 cr3 = read_cr3();
199 cr4 = read_cr4();
1da177e4 200
d015a092 201 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
7de08b4e 202 fs, fsindex, gs, gsindex, shadowgs);
d015a092 203 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
8092c654 204 es, cr0);
d015a092 205 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
8092c654 206 cr4);
bb1995d5
AS
207
208 get_debugreg(d0, 0);
209 get_debugreg(d1, 1);
210 get_debugreg(d2, 2);
d015a092 211 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
bb1995d5
AS
212 get_debugreg(d3, 3);
213 get_debugreg(d6, 6);
214 get_debugreg(d7, 7);
d015a092 215 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
1da177e4
LT
216}
217
1da177e4
LT
218void release_thread(struct task_struct *dead_task)
219{
220 if (dead_task->mm) {
221 if (dead_task->mm->context.size) {
222 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
223 dead_task->comm,
224 dead_task->mm->context.ldt,
225 dead_task->mm->context.size);
226 BUG();
227 }
228 }
229}
230
231static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
232{
6612538c 233 struct user_desc ud = {
1da177e4
LT
234 .base_addr = addr,
235 .limit = 0xfffff,
236 .seg_32bit = 1,
237 .limit_in_pages = 1,
238 .useable = 1,
239 };
ade1af77 240 struct desc_struct *desc = t->thread.tls_array;
1da177e4 241 desc += tls;
80fbb69a 242 fill_ldt(desc, &ud);
1da177e4
LT
243}
244
245static inline u32 read_32bit_tls(struct task_struct *t, int tls)
246{
91394eb0 247 return get_desc_base(&t->thread.tls_array[tls]);
1da177e4
LT
248}
249
250/*
251 * This gets called before we allocate a new thread and copy
252 * the current task into it.
253 */
254void prepare_to_copy(struct task_struct *tsk)
255{
256 unlazy_fpu(tsk);
257}
258
6f2c55b8 259int copy_thread(unsigned long clone_flags, unsigned long sp,
1da177e4 260 unsigned long unused,
7de08b4e 261 struct task_struct *p, struct pt_regs *regs)
1da177e4
LT
262{
263 int err;
7de08b4e 264 struct pt_regs *childregs;
1da177e4
LT
265 struct task_struct *me = current;
266
a88cde13 267 childregs = ((struct pt_regs *)
57eafdc2 268 (THREAD_SIZE + task_stack_page(p))) - 1;
1da177e4
LT
269 *childregs = *regs;
270
65ea5b03 271 childregs->ax = 0;
fa4b8f84
BG
272 if (user_mode(regs))
273 childregs->sp = sp;
274 else
65ea5b03 275 childregs->sp = (unsigned long)childregs;
1da177e4 276
faca6227
PA
277 p->thread.sp = (unsigned long) childregs;
278 p->thread.sp0 = (unsigned long) (childregs+1);
279 p->thread.usersp = me->thread.usersp;
1da177e4 280
e4f17c43 281 set_tsk_thread_flag(p, TIF_FORK);
1da177e4 282
66cb5917 283 p->thread.io_bitmap_ptr = NULL;
1da177e4 284
ada85708 285 savesegment(gs, p->thread.gsindex);
7ce5a2b9 286 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
ada85708 287 savesegment(fs, p->thread.fsindex);
7ce5a2b9 288 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
ada85708
JF
289 savesegment(es, p->thread.es);
290 savesegment(ds, p->thread.ds);
1da177e4 291
66cb5917 292 err = -ENOMEM;
24f1e32c 293 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
66cb5917 294
d3a4f48d 295 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
1da177e4
LT
296 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
297 if (!p->thread.io_bitmap_ptr) {
298 p->thread.io_bitmap_max = 0;
299 return -ENOMEM;
300 }
a88cde13
AK
301 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
302 IO_BITMAP_BYTES);
d3a4f48d 303 set_tsk_thread_flag(p, TIF_IO_BITMAP);
6612538c 304 }
1da177e4
LT
305
306 /*
307 * Set a new TLS for the child thread?
308 */
309 if (clone_flags & CLONE_SETTLS) {
310#ifdef CONFIG_IA32_EMULATION
311 if (test_thread_flag(TIF_IA32))
efd1ca52 312 err = do_set_thread_area(p, -1,
65ea5b03 313 (struct user_desc __user *)childregs->si, 0);
7de08b4e
GP
314 else
315#endif
316 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
317 if (err)
1da177e4
LT
318 goto out;
319 }
320 err = 0;
321out:
322 if (err && p->thread.io_bitmap_ptr) {
323 kfree(p->thread.io_bitmap_ptr);
324 p->thread.io_bitmap_max = 0;
325 }
66cb5917 326
1da177e4
LT
327 return err;
328}
329
e634d8fc
PA
330static void
331start_thread_common(struct pt_regs *regs, unsigned long new_ip,
332 unsigned long new_sp,
333 unsigned int _cs, unsigned int _ss, unsigned int _ds)
513ad84b 334{
ada85708 335 loadsegment(fs, 0);
e634d8fc
PA
336 loadsegment(es, _ds);
337 loadsegment(ds, _ds);
513ad84b
IM
338 load_gs_index(0);
339 regs->ip = new_ip;
340 regs->sp = new_sp;
3d1e42a7 341 percpu_write(old_rsp, new_sp);
e634d8fc
PA
342 regs->cs = _cs;
343 regs->ss = _ss;
a6f05a6a 344 regs->flags = X86_EFLAGS_IF;
513ad84b 345 set_fs(USER_DS);
aa283f49
SS
346 /*
347 * Free the old FP and other extended state
348 */
349 free_thread_xstate(current);
513ad84b 350}
e634d8fc
PA
351
352void
353start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
354{
355 start_thread_common(regs, new_ip, new_sp,
356 __USER_CS, __USER_DS, 0);
357}
513ad84b 358
a6f05a6a
PA
359#ifdef CONFIG_IA32_EMULATION
360void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
361{
e634d8fc
PA
362 start_thread_common(regs, new_ip, new_sp,
363 __USER32_CS, __USER32_DS, __USER32_DS);
a6f05a6a
PA
364}
365#endif
513ad84b 366
1da177e4
LT
367/*
368 * switch_to(x,y) should switch tasks from x to y.
369 *
6612538c 370 * This could still be optimized:
1da177e4
LT
371 * - fold all the options into a flag word and test it with a single test.
372 * - could test fs/gs bitsliced
099f318b
AK
373 *
374 * Kprobes not supported here. Set the probe on schedule instead.
8b96f011 375 * Function graph tracer not supported too.
1da177e4 376 */
8b96f011 377__notrace_funcgraph struct task_struct *
a88cde13 378__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
1da177e4 379{
87b935a0
JF
380 struct thread_struct *prev = &prev_p->thread;
381 struct thread_struct *next = &next_p->thread;
6612538c 382 int cpu = smp_processor_id();
1da177e4 383 struct tss_struct *tss = &per_cpu(init_tss, cpu);
478de5a9 384 unsigned fsindex, gsindex;
17950c5b
JF
385 bool preload_fpu;
386
387 /*
388 * If the task has used fpu the last 5 timeslices, just do a full
389 * restore of the math state immediately to avoid the trap; the
390 * chances of needing FPU soon are obviously high now
391 */
392 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
1da177e4 393
e07e23e1 394 /* we're going to use this soon, after a few expensive things */
17950c5b 395 if (preload_fpu)
86603283 396 prefetch(next->fpu.state);
e07e23e1 397
1da177e4
LT
398 /*
399 * Reload esp0, LDT and the page table pointer:
400 */
7818a1e0 401 load_sp0(tss, next);
1da177e4 402
7de08b4e 403 /*
1da177e4
LT
404 * Switch DS and ES.
405 * This won't pick up thread selector changes, but I guess that is ok.
406 */
ada85708 407 savesegment(es, prev->es);
1da177e4 408 if (unlikely(next->es | prev->es))
7de08b4e 409 loadsegment(es, next->es);
ada85708
JF
410
411 savesegment(ds, prev->ds);
1da177e4
LT
412 if (unlikely(next->ds | prev->ds))
413 loadsegment(ds, next->ds);
414
478de5a9
JF
415
416 /* We must save %fs and %gs before load_TLS() because
417 * %fs and %gs may be cleared by load_TLS().
418 *
419 * (e.g. xen_load_tls())
420 */
421 savesegment(fs, fsindex);
422 savesegment(gs, gsindex);
423
1da177e4
LT
424 load_TLS(next, cpu);
425
16d9dbf0 426 /* Must be after DS reload */
a4d4fbc7 427 __unlazy_fpu(prev_p);
16d9dbf0 428
17950c5b
JF
429 /* Make sure cpu is ready for new context */
430 if (preload_fpu)
431 clts();
432
3fe0a63e
JF
433 /*
434 * Leave lazy mode, flushing any hypercalls made here.
435 * This must be done before restoring TLS segments so
436 * the GDT and LDT are properly updated, and must be
437 * done before math_state_restore, so the TS bit is up
438 * to date.
439 */
224101ed 440 arch_end_context_switch(next_p);
3fe0a63e 441
7de08b4e 442 /*
1da177e4 443 * Switch FS and GS.
87b935a0
JF
444 *
445 * Segment register != 0 always requires a reload. Also
446 * reload when it has changed. When prev process used 64bit
447 * base always reload to avoid an information leak.
1da177e4 448 */
87b935a0
JF
449 if (unlikely(fsindex | next->fsindex | prev->fs)) {
450 loadsegment(fs, next->fsindex);
7de08b4e 451 /*
87b935a0
JF
452 * Check if the user used a selector != 0; if yes
453 * clear 64bit base, since overloaded base is always
454 * mapped to the Null selector
455 */
456 if (fsindex)
7de08b4e 457 prev->fs = 0;
1da177e4 458 }
87b935a0
JF
459 /* when next process has a 64bit base use it */
460 if (next->fs)
461 wrmsrl(MSR_FS_BASE, next->fs);
462 prev->fsindex = fsindex;
463
464 if (unlikely(gsindex | next->gsindex | prev->gs)) {
465 load_gs_index(next->gsindex);
466 if (gsindex)
7de08b4e 467 prev->gs = 0;
1da177e4 468 }
87b935a0
JF
469 if (next->gs)
470 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
471 prev->gsindex = gsindex;
1da177e4 472
7de08b4e 473 /*
45948d77 474 * Switch the PDA and FPU contexts.
1da177e4 475 */
3d1e42a7
BG
476 prev->usersp = percpu_read(old_rsp);
477 percpu_write(old_rsp, next->usersp);
c6f5e0ac 478 percpu_write(current_task, next_p);
18bd057b 479
9af45651 480 percpu_write(kernel_stack,
87b935a0 481 (unsigned long)task_stack_page(next_p) +
9af45651 482 THREAD_SIZE - KERNEL_STACK_OFFSET);
1da177e4
LT
483
484 /*
d3a4f48d 485 * Now maybe reload the debug registers and handle I/O bitmaps
1da177e4 486 */
eee3af4a
MM
487 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
488 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
d3a4f48d 489 __switch_to_xtra(prev_p, next_p, tss);
1da177e4 490
17950c5b
JF
491 /*
492 * Preload the FPU context, now that we've determined that the
493 * task is likely to be using it.
e07e23e1 494 */
17950c5b
JF
495 if (preload_fpu)
496 __math_state_restore();
66cb5917 497
1da177e4
LT
498 return prev_p;
499}
500
1da177e4
LT
501void set_personality_64bit(void)
502{
503 /* inherit personality from parent */
504
505 /* Make sure to be in 64bit mode */
6612538c 506 clear_thread_flag(TIF_IA32);
1da177e4
LT
507
508 /* TBD: overwrites user setup. Should have two bits.
509 But 64bit processes have always behaved this way,
510 so it's not too bad. The main problem is just that
6612538c 511 32bit childs are affected again. */
1da177e4
LT
512 current->personality &= ~READ_IMPLIES_EXEC;
513}
514
05d43ed8
PA
515void set_personality_ia32(void)
516{
517 /* inherit personality from parent */
518
519 /* Make sure to be in 32bit mode */
520 set_thread_flag(TIF_IA32);
1252f238 521 current->personality |= force_personality32;
05d43ed8
PA
522
523 /* Prepare the first "return" to user space */
524 current_thread_info()->status |= TS_COMPAT;
525}
526
1da177e4
LT
527unsigned long get_wchan(struct task_struct *p)
528{
529 unsigned long stack;
7de08b4e 530 u64 fp, ip;
1da177e4
LT
531 int count = 0;
532
7de08b4e
GP
533 if (!p || p == current || p->state == TASK_RUNNING)
534 return 0;
57eafdc2 535 stack = (unsigned long)task_stack_page(p);
e1e23bb0 536 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
1da177e4 537 return 0;
faca6227 538 fp = *(u64 *)(p->thread.sp);
7de08b4e 539 do {
a88cde13 540 if (fp < (unsigned long)stack ||
e1e23bb0 541 fp >= (unsigned long)stack+THREAD_SIZE)
7de08b4e 542 return 0;
65ea5b03
PA
543 ip = *(u64 *)(fp+8);
544 if (!in_sched_functions(ip))
545 return ip;
7de08b4e
GP
546 fp = *(u64 *)fp;
547 } while (count++ < 16);
1da177e4
LT
548 return 0;
549}
550
551long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
7de08b4e
GP
552{
553 int ret = 0;
1da177e4
LT
554 int doit = task == current;
555 int cpu;
556
7de08b4e 557 switch (code) {
1da177e4 558 case ARCH_SET_GS:
84929801 559 if (addr >= TASK_SIZE_OF(task))
7de08b4e 560 return -EPERM;
1da177e4 561 cpu = get_cpu();
7de08b4e 562 /* handle small bases via the GDT because that's faster to
1da177e4 563 switch. */
7de08b4e
GP
564 if (addr <= 0xffffffff) {
565 set_32bit_tls(task, GS_TLS, addr);
566 if (doit) {
1da177e4 567 load_TLS(&task->thread, cpu);
7de08b4e 568 load_gs_index(GS_TLS_SEL);
1da177e4 569 }
7de08b4e 570 task->thread.gsindex = GS_TLS_SEL;
1da177e4 571 task->thread.gs = 0;
7de08b4e 572 } else {
1da177e4
LT
573 task->thread.gsindex = 0;
574 task->thread.gs = addr;
575 if (doit) {
a88cde13
AK
576 load_gs_index(0);
577 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
7de08b4e 578 }
1da177e4
LT
579 }
580 put_cpu();
581 break;
582 case ARCH_SET_FS:
583 /* Not strictly needed for fs, but do it for symmetry
584 with gs */
84929801 585 if (addr >= TASK_SIZE_OF(task))
6612538c 586 return -EPERM;
1da177e4 587 cpu = get_cpu();
6612538c 588 /* handle small bases via the GDT because that's faster to
1da177e4 589 switch. */
6612538c 590 if (addr <= 0xffffffff) {
1da177e4 591 set_32bit_tls(task, FS_TLS, addr);
6612538c
HS
592 if (doit) {
593 load_TLS(&task->thread, cpu);
ada85708 594 loadsegment(fs, FS_TLS_SEL);
1da177e4
LT
595 }
596 task->thread.fsindex = FS_TLS_SEL;
597 task->thread.fs = 0;
6612538c 598 } else {
1da177e4
LT
599 task->thread.fsindex = 0;
600 task->thread.fs = addr;
601 if (doit) {
602 /* set the selector to 0 to not confuse
603 __switch_to */
ada85708 604 loadsegment(fs, 0);
a88cde13 605 ret = checking_wrmsrl(MSR_FS_BASE, addr);
1da177e4
LT
606 }
607 }
608 put_cpu();
609 break;
6612538c
HS
610 case ARCH_GET_FS: {
611 unsigned long base;
1da177e4
LT
612 if (task->thread.fsindex == FS_TLS_SEL)
613 base = read_32bit_tls(task, FS_TLS);
a88cde13 614 else if (doit)
1da177e4 615 rdmsrl(MSR_FS_BASE, base);
a88cde13 616 else
1da177e4 617 base = task->thread.fs;
6612538c
HS
618 ret = put_user(base, (unsigned long __user *)addr);
619 break;
1da177e4 620 }
6612538c 621 case ARCH_GET_GS: {
1da177e4 622 unsigned long base;
97c2803c 623 unsigned gsindex;
1da177e4
LT
624 if (task->thread.gsindex == GS_TLS_SEL)
625 base = read_32bit_tls(task, GS_TLS);
97c2803c 626 else if (doit) {
ada85708 627 savesegment(gs, gsindex);
97c2803c
JB
628 if (gsindex)
629 rdmsrl(MSR_KERNEL_GS_BASE, base);
630 else
631 base = task->thread.gs;
7de08b4e 632 } else
1da177e4 633 base = task->thread.gs;
6612538c 634 ret = put_user(base, (unsigned long __user *)addr);
1da177e4
LT
635 break;
636 }
637
638 default:
639 ret = -EINVAL;
640 break;
6612538c 641 }
1da177e4 642
6612538c
HS
643 return ret;
644}
1da177e4
LT
645
646long sys_arch_prctl(int code, unsigned long addr)
647{
648 return do_arch_prctl(current, code, addr);
1da177e4
LT
649}
650
89240ba0
SS
651unsigned long KSTK_ESP(struct task_struct *task)
652{
653 return (test_tsk_thread_flag(task, TIF_IA32)) ?
654 (task_pt_regs(task)->sp) : ((task)->thread.usersp);
655}
This page took 0.615557 seconds and 5 git commands to generate.