x86: rename the struct pt_regs members for 32/64-bit consistency
[deliverable/linux.git] / arch / x86 / kernel / process_64.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 *
7 * X86-64 port
8 * Andi Kleen.
76e4f660
AR
9 *
10 * CPU hotplug support - ashok.raj@intel.com
1da177e4
LT
11 */
12
13/*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
17#include <stdarg.h>
18
76e4f660 19#include <linux/cpu.h>
1da177e4
LT
20#include <linux/errno.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
4e950f6f 24#include <linux/fs.h>
1da177e4
LT
25#include <linux/elfcore.h>
26#include <linux/smp.h>
27#include <linux/slab.h>
28#include <linux/user.h>
29#include <linux/module.h>
30#include <linux/a.out.h>
31#include <linux/interrupt.h>
32#include <linux/delay.h>
1da177e4
LT
33#include <linux/ptrace.h>
34#include <linux/utsname.h>
35#include <linux/random.h>
95833c83 36#include <linux/notifier.h>
c6fd91f0 37#include <linux/kprobes.h>
1eeb66a1 38#include <linux/kdebug.h>
02290683 39#include <linux/tick.h>
1da177e4
LT
40
41#include <asm/uaccess.h>
42#include <asm/pgtable.h>
43#include <asm/system.h>
44#include <asm/io.h>
45#include <asm/processor.h>
46#include <asm/i387.h>
47#include <asm/mmu_context.h>
48#include <asm/pda.h>
49#include <asm/prctl.h>
1da177e4
LT
50#include <asm/desc.h>
51#include <asm/proto.h>
52#include <asm/ia32.h>
95833c83 53#include <asm/idle.h>
1da177e4
LT
54
55asmlinkage extern void ret_from_fork(void);
56
57unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
58
1da177e4
LT
59unsigned long boot_option_idle_override = 0;
60EXPORT_SYMBOL(boot_option_idle_override);
61
62/*
63 * Powermanagement idle function, if any..
64 */
65void (*pm_idle)(void);
2ee60e17 66EXPORT_SYMBOL(pm_idle);
1da177e4
LT
67static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
68
e041c683 69static ATOMIC_NOTIFIER_HEAD(idle_notifier);
95833c83
AK
70
71void idle_notifier_register(struct notifier_block *n)
72{
e041c683 73 atomic_notifier_chain_register(&idle_notifier, n);
95833c83 74}
95833c83 75
95833c83
AK
76void enter_idle(void)
77{
a15da49d 78 write_pda(isidle, 1);
e041c683 79 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
95833c83
AK
80}
81
82static void __exit_idle(void)
83{
9446868b 84 if (test_and_clear_bit_pda(0, isidle) == 0)
a15da49d 85 return;
e041c683 86 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
95833c83
AK
87}
88
89/* Called from interrupts to signify idle end */
90void exit_idle(void)
91{
a15da49d
AK
92 /* idle loop has pid 0 */
93 if (current->pid)
95833c83
AK
94 return;
95 __exit_idle();
96}
97
1da177e4
LT
98/*
99 * We use this if we don't have any better
100 * idle routine..
101 */
cdb04527 102static void default_idle(void)
1da177e4 103{
495ab9c0 104 current_thread_info()->status &= ~TS_POLLING;
0888f06a
IM
105 /*
106 * TS_POLLING-cleared state must be visible before we
107 * test NEED_RESCHED:
108 */
109 smp_mb();
72690a21
AK
110 local_irq_disable();
111 if (!need_resched()) {
5ee613b6
IM
112 ktime_t t0, t1;
113 u64 t0n, t1n;
114
115 t0 = ktime_get();
116 t0n = ktime_to_ns(t0);
117 safe_halt(); /* enables interrupts racelessly */
118 local_irq_disable();
119 t1 = ktime_get();
120 t1n = ktime_to_ns(t1);
121 sched_clock_idle_wakeup_event(t1n - t0n);
39d44a51
HS
122 }
123 local_irq_enable();
495ab9c0 124 current_thread_info()->status |= TS_POLLING;
1da177e4
LT
125}
126
127/*
128 * On SMP it's slightly faster (but much more power-consuming!)
129 * to poll the ->need_resched flag instead of waiting for the
130 * cross-CPU IPI to arrive. Use this option with caution.
131 */
132static void poll_idle (void)
133{
d331e739 134 local_irq_enable();
72690a21 135 cpu_relax();
1da177e4
LT
136}
137
40d6a146
SR
138static void do_nothing(void *unused)
139{
140}
141
1da177e4
LT
142void cpu_idle_wait(void)
143{
144 unsigned int cpu, this_cpu = get_cpu();
dc1829a4 145 cpumask_t map, tmp = current->cpus_allowed;
1da177e4
LT
146
147 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
148 put_cpu();
149
150 cpus_clear(map);
151 for_each_online_cpu(cpu) {
152 per_cpu(cpu_idle_state, cpu) = 1;
153 cpu_set(cpu, map);
154 }
155
156 __get_cpu_var(cpu_idle_state) = 0;
157
158 wmb();
159 do {
160 ssleep(1);
161 for_each_online_cpu(cpu) {
a88cde13
AK
162 if (cpu_isset(cpu, map) &&
163 !per_cpu(cpu_idle_state, cpu))
1da177e4
LT
164 cpu_clear(cpu, map);
165 }
166 cpus_and(map, map, cpu_online_map);
40d6a146
SR
167 /*
168 * We waited 1 sec, if a CPU still did not call idle
169 * it may be because it is in idle and not waking up
170 * because it has nothing to do.
171 * Give all the remaining CPUS a kick.
172 */
173 smp_call_function_mask(map, do_nothing, 0, 0);
1da177e4 174 } while (!cpus_empty(map));
dc1829a4
IM
175
176 set_cpus_allowed(current, tmp);
1da177e4
LT
177}
178EXPORT_SYMBOL_GPL(cpu_idle_wait);
179
76e4f660
AR
180#ifdef CONFIG_HOTPLUG_CPU
181DECLARE_PER_CPU(int, cpu_state);
182
183#include <asm/nmi.h>
1fa744e6 184/* We halt the CPU with physical CPU hotplug */
76e4f660
AR
185static inline void play_dead(void)
186{
187 idle_task_exit();
188 wbinvd();
189 mb();
190 /* Ack it */
191 __get_cpu_var(cpu_state) = CPU_DEAD;
192
1fa744e6 193 local_irq_disable();
76e4f660 194 while (1)
1fa744e6 195 halt();
76e4f660
AR
196}
197#else
198static inline void play_dead(void)
199{
200 BUG();
201}
202#endif /* CONFIG_HOTPLUG_CPU */
203
1da177e4
LT
204/*
205 * The idle thread. There's no useful work to be
206 * done, so just try to conserve power and have a
207 * low exit latency (ie sit in a loop waiting for
208 * somebody to say that they'd like to reschedule)
209 */
b10db7f0 210void cpu_idle(void)
1da177e4 211{
495ab9c0 212 current_thread_info()->status |= TS_POLLING;
1da177e4
LT
213 /* endless idle loop with no priority at all */
214 while (1) {
215 while (!need_resched()) {
216 void (*idle)(void);
217
218 if (__get_cpu_var(cpu_idle_state))
219 __get_cpu_var(cpu_idle_state) = 0;
220
02290683
CW
221 tick_nohz_stop_sched_tick();
222
1da177e4
LT
223 rmb();
224 idle = pm_idle;
225 if (!idle)
226 idle = default_idle;
76e4f660
AR
227 if (cpu_is_offline(smp_processor_id()))
228 play_dead();
d331e739
VP
229 /*
230 * Idle routines should keep interrupts disabled
231 * from here on, until they go to idle.
232 * Otherwise, idle callbacks can misfire.
233 */
234 local_irq_disable();
95833c83 235 enter_idle();
1da177e4 236 idle();
a15da49d
AK
237 /* In many cases the interrupt that ended idle
238 has already called exit_idle. But some idle
239 loops can be woken up without interrupt. */
95833c83 240 __exit_idle();
1da177e4
LT
241 }
242
02290683 243 tick_nohz_restart_sched_tick();
5bfb5d69 244 preempt_enable_no_resched();
1da177e4 245 schedule();
5bfb5d69 246 preempt_disable();
1da177e4
LT
247 }
248}
249
250/*
251 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
252 * which can obviate IPI to trigger checking of need_resched.
253 * We execute MONITOR against need_resched and enter optimized wait state
254 * through MWAIT. Whenever someone changes need_resched, we would be woken
255 * up from MWAIT (without an IPI).
991528d7
VP
256 *
257 * New with Core Duo processors, MWAIT can take some hints based on CPU
258 * capability.
1da177e4 259 */
65ea5b03 260void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
1da177e4 261{
991528d7 262 if (!need_resched()) {
64c7c8f8
NP
263 __monitor((void *)&current_thread_info()->flags, 0, 0);
264 smp_mb();
991528d7 265 if (!need_resched())
65ea5b03 266 __mwait(ax, cx);
1da177e4
LT
267 }
268}
269
991528d7
VP
270/* Default MONITOR/MWAIT with no hints, used for default C1 state */
271static void mwait_idle(void)
272{
d331e739
VP
273 if (!need_resched()) {
274 __monitor((void *)&current_thread_info()->flags, 0, 0);
275 smp_mb();
276 if (!need_resched())
277 __sti_mwait(0, 0);
278 else
279 local_irq_enable();
280 } else {
281 local_irq_enable();
282 }
991528d7
VP
283}
284
e6982c67 285void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
1da177e4
LT
286{
287 static int printed;
288 if (cpu_has(c, X86_FEATURE_MWAIT)) {
289 /*
290 * Skip, if setup has overridden idle.
291 * One CPU supports mwait => All CPUs supports mwait
292 */
293 if (!pm_idle) {
294 if (!printed) {
2d4fa2f6 295 printk(KERN_INFO "using mwait in idle threads.\n");
1da177e4
LT
296 printed = 1;
297 }
298 pm_idle = mwait_idle;
299 }
300 }
301}
302
303static int __init idle_setup (char *str)
304{
f039b754 305 if (!strcmp(str, "poll")) {
1da177e4
LT
306 printk("using polling idle threads.\n");
307 pm_idle = poll_idle;
f039b754
AK
308 } else if (!strcmp(str, "mwait"))
309 force_mwait = 1;
310 else
311 return -1;
1da177e4
LT
312
313 boot_option_idle_override = 1;
f039b754 314 return 0;
1da177e4 315}
f039b754 316early_param("idle", idle_setup);
1da177e4
LT
317
318/* Prints also some state that isn't saved in the pt_regs */
319void __show_regs(struct pt_regs * regs)
320{
321 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
bb1995d5 322 unsigned long d0, d1, d2, d3, d6, d7;
1da177e4
LT
323 unsigned int fsindex,gsindex;
324 unsigned int ds,cs,es;
325
326 printk("\n");
327 print_modules();
9acf23c4
AK
328 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
329 current->pid, current->comm, print_tainted(),
96b644bd
SH
330 init_utsname()->release,
331 (int)strcspn(init_utsname()->version, " "),
332 init_utsname()->version);
65ea5b03
PA
333 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
334 printk_address(regs->ip);
335 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp,
336 regs->flags);
1da177e4 337 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
65ea5b03 338 regs->ax, regs->bx, regs->cx);
1da177e4 339 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
65ea5b03 340 regs->dx, regs->si, regs->di);
1da177e4 341 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
65ea5b03 342 regs->bp, regs->r8, regs->r9);
1da177e4
LT
343 printk("R10: %016lx R11: %016lx R12: %016lx\n",
344 regs->r10, regs->r11, regs->r12);
345 printk("R13: %016lx R14: %016lx R15: %016lx\n",
346 regs->r13, regs->r14, regs->r15);
347
348 asm("movl %%ds,%0" : "=r" (ds));
349 asm("movl %%cs,%0" : "=r" (cs));
350 asm("movl %%es,%0" : "=r" (es));
351 asm("movl %%fs,%0" : "=r" (fsindex));
352 asm("movl %%gs,%0" : "=r" (gsindex));
353
354 rdmsrl(MSR_FS_BASE, fs);
355 rdmsrl(MSR_GS_BASE, gs);
356 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
357
f51c9452
GOC
358 cr0 = read_cr0();
359 cr2 = read_cr2();
360 cr3 = read_cr3();
361 cr4 = read_cr4();
1da177e4
LT
362
363 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
364 fs,fsindex,gs,gsindex,shadowgs);
365 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
366 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
bb1995d5
AS
367
368 get_debugreg(d0, 0);
369 get_debugreg(d1, 1);
370 get_debugreg(d2, 2);
371 printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
372 get_debugreg(d3, 3);
373 get_debugreg(d6, 6);
374 get_debugreg(d7, 7);
375 printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
1da177e4
LT
376}
377
378void show_regs(struct pt_regs *regs)
379{
c078d326 380 printk("CPU %d:", smp_processor_id());
1da177e4 381 __show_regs(regs);
b538ed27 382 show_trace(NULL, regs, (void *)(regs + 1));
1da177e4
LT
383}
384
385/*
386 * Free current thread data structures etc..
387 */
388void exit_thread(void)
389{
390 struct task_struct *me = current;
391 struct thread_struct *t = &me->thread;
73649dab 392
1da177e4
LT
393 if (me->thread.io_bitmap_ptr) {
394 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
395
396 kfree(t->io_bitmap_ptr);
397 t->io_bitmap_ptr = NULL;
d3a4f48d 398 clear_thread_flag(TIF_IO_BITMAP);
1da177e4
LT
399 /*
400 * Careful, clear this in the TSS too:
401 */
402 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
403 t->io_bitmap_max = 0;
404 put_cpu();
405 }
406}
407
408void flush_thread(void)
409{
410 struct task_struct *tsk = current;
1da177e4 411
303cd153
MD
412 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
413 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
414 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
415 clear_tsk_thread_flag(tsk, TIF_IA32);
416 } else {
417 set_tsk_thread_flag(tsk, TIF_IA32);
4d9bc79c 418 current_thread_info()->status |= TS_COMPAT;
303cd153 419 }
4d9bc79c 420 }
303cd153 421 clear_tsk_thread_flag(tsk, TIF_DEBUG);
1da177e4
LT
422
423 tsk->thread.debugreg0 = 0;
424 tsk->thread.debugreg1 = 0;
425 tsk->thread.debugreg2 = 0;
426 tsk->thread.debugreg3 = 0;
427 tsk->thread.debugreg6 = 0;
428 tsk->thread.debugreg7 = 0;
429 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
430 /*
431 * Forget coprocessor state..
432 */
433 clear_fpu(tsk);
434 clear_used_math();
435}
436
437void release_thread(struct task_struct *dead_task)
438{
439 if (dead_task->mm) {
440 if (dead_task->mm->context.size) {
441 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
442 dead_task->comm,
443 dead_task->mm->context.ldt,
444 dead_task->mm->context.size);
445 BUG();
446 }
447 }
448}
449
450static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
451{
452 struct user_desc ud = {
453 .base_addr = addr,
454 .limit = 0xfffff,
455 .seg_32bit = 1,
456 .limit_in_pages = 1,
457 .useable = 1,
458 };
459 struct n_desc_struct *desc = (void *)t->thread.tls_array;
460 desc += tls;
461 desc->a = LDT_entry_a(&ud);
462 desc->b = LDT_entry_b(&ud);
463}
464
465static inline u32 read_32bit_tls(struct task_struct *t, int tls)
466{
91394eb0 467 return get_desc_base(&t->thread.tls_array[tls]);
1da177e4
LT
468}
469
470/*
471 * This gets called before we allocate a new thread and copy
472 * the current task into it.
473 */
474void prepare_to_copy(struct task_struct *tsk)
475{
476 unlazy_fpu(tsk);
477}
478
65ea5b03 479int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
1da177e4
LT
480 unsigned long unused,
481 struct task_struct * p, struct pt_regs * regs)
482{
483 int err;
484 struct pt_regs * childregs;
485 struct task_struct *me = current;
486
a88cde13 487 childregs = ((struct pt_regs *)
57eafdc2 488 (THREAD_SIZE + task_stack_page(p))) - 1;
1da177e4
LT
489 *childregs = *regs;
490
65ea5b03
PA
491 childregs->ax = 0;
492 childregs->sp = sp;
493 if (sp == ~0UL)
494 childregs->sp = (unsigned long)childregs;
1da177e4
LT
495
496 p->thread.rsp = (unsigned long) childregs;
497 p->thread.rsp0 = (unsigned long) (childregs+1);
498 p->thread.userrsp = me->thread.userrsp;
499
e4f17c43 500 set_tsk_thread_flag(p, TIF_FORK);
1da177e4
LT
501
502 p->thread.fs = me->thread.fs;
503 p->thread.gs = me->thread.gs;
504
fd51f666
L
505 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
506 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
507 asm("mov %%es,%0" : "=m" (p->thread.es));
508 asm("mov %%ds,%0" : "=m" (p->thread.ds));
1da177e4 509
d3a4f48d 510 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
1da177e4
LT
511 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
512 if (!p->thread.io_bitmap_ptr) {
513 p->thread.io_bitmap_max = 0;
514 return -ENOMEM;
515 }
a88cde13
AK
516 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
517 IO_BITMAP_BYTES);
d3a4f48d 518 set_tsk_thread_flag(p, TIF_IO_BITMAP);
1da177e4
LT
519 }
520
521 /*
522 * Set a new TLS for the child thread?
523 */
524 if (clone_flags & CLONE_SETTLS) {
525#ifdef CONFIG_IA32_EMULATION
526 if (test_thread_flag(TIF_IA32))
efd1ca52 527 err = do_set_thread_area(p, -1,
65ea5b03 528 (struct user_desc __user *)childregs->si, 0);
1da177e4
LT
529 else
530#endif
531 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
532 if (err)
533 goto out;
534 }
535 err = 0;
536out:
537 if (err && p->thread.io_bitmap_ptr) {
538 kfree(p->thread.io_bitmap_ptr);
539 p->thread.io_bitmap_max = 0;
540 }
541 return err;
542}
543
544/*
545 * This special macro can be used to load a debugging register
546 */
2b514e74 547#define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r)
1da177e4 548
d3a4f48d
SE
549static inline void __switch_to_xtra(struct task_struct *prev_p,
550 struct task_struct *next_p,
551 struct tss_struct *tss)
552{
553 struct thread_struct *prev, *next;
554
555 prev = &prev_p->thread,
556 next = &next_p->thread;
557
7e991604
RM
558 if (next->debugctlmsr != prev->debugctlmsr)
559 wrmsrl(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr);
560
d3a4f48d
SE
561 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
562 loaddebug(next, 0);
563 loaddebug(next, 1);
564 loaddebug(next, 2);
565 loaddebug(next, 3);
566 /* no 4 and 5 */
567 loaddebug(next, 6);
568 loaddebug(next, 7);
569 }
570
571 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
572 /*
573 * Copy the relevant range of the IO bitmap.
574 * Normally this is 128 bytes or less:
575 */
576 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
577 max(prev->io_bitmap_max, next->io_bitmap_max));
578 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
579 /*
580 * Clear any possible leftover bits:
581 */
582 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
583 }
584}
585
1da177e4
LT
586/*
587 * switch_to(x,y) should switch tasks from x to y.
588 *
589 * This could still be optimized:
590 * - fold all the options into a flag word and test it with a single test.
591 * - could test fs/gs bitsliced
099f318b
AK
592 *
593 * Kprobes not supported here. Set the probe on schedule instead.
1da177e4 594 */
f438d914 595struct task_struct *
a88cde13 596__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
1da177e4
LT
597{
598 struct thread_struct *prev = &prev_p->thread,
599 *next = &next_p->thread;
600 int cpu = smp_processor_id();
601 struct tss_struct *tss = &per_cpu(init_tss, cpu);
602
e07e23e1
AV
603 /* we're going to use this soon, after a few expensive things */
604 if (next_p->fpu_counter>5)
605 prefetch(&next->i387.fxsave);
606
1da177e4
LT
607 /*
608 * Reload esp0, LDT and the page table pointer:
609 */
610 tss->rsp0 = next->rsp0;
611
612 /*
613 * Switch DS and ES.
614 * This won't pick up thread selector changes, but I guess that is ok.
615 */
fd51f666 616 asm volatile("mov %%es,%0" : "=m" (prev->es));
1da177e4
LT
617 if (unlikely(next->es | prev->es))
618 loadsegment(es, next->es);
619
fd51f666 620 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
1da177e4
LT
621 if (unlikely(next->ds | prev->ds))
622 loadsegment(ds, next->ds);
623
624 load_TLS(next, cpu);
625
626 /*
627 * Switch FS and GS.
628 */
629 {
630 unsigned fsindex;
631 asm volatile("movl %%fs,%0" : "=r" (fsindex));
632 /* segment register != 0 always requires a reload.
633 also reload when it has changed.
634 when prev process used 64bit base always reload
635 to avoid an information leak. */
636 if (unlikely(fsindex | next->fsindex | prev->fs)) {
637 loadsegment(fs, next->fsindex);
638 /* check if the user used a selector != 0
639 * if yes clear 64bit base, since overloaded base
640 * is always mapped to the Null selector
641 */
642 if (fsindex)
643 prev->fs = 0;
644 }
645 /* when next process has a 64bit base use it */
646 if (next->fs)
647 wrmsrl(MSR_FS_BASE, next->fs);
648 prev->fsindex = fsindex;
649 }
650 {
651 unsigned gsindex;
652 asm volatile("movl %%gs,%0" : "=r" (gsindex));
653 if (unlikely(gsindex | next->gsindex | prev->gs)) {
654 load_gs_index(next->gsindex);
655 if (gsindex)
656 prev->gs = 0;
657 }
658 if (next->gs)
659 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
660 prev->gsindex = gsindex;
661 }
662
0a5ace2a
AK
663 /* Must be after DS reload */
664 unlazy_fpu(prev_p);
665
1da177e4 666 /*
45948d77 667 * Switch the PDA and FPU contexts.
1da177e4
LT
668 */
669 prev->userrsp = read_pda(oldrsp);
670 write_pda(oldrsp, next->userrsp);
671 write_pda(pcurrent, next_p);
18bd057b 672
a88cde13 673 write_pda(kernelstack,
7b0bda74 674 (unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
0a425405
AV
675#ifdef CONFIG_CC_STACKPROTECTOR
676 write_pda(stack_canary, next_p->stack_canary);
677 /*
678 * Build time only check to make sure the stack_canary is at
679 * offset 40 in the pda; this is a gcc ABI requirement
680 */
681 BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
682#endif
1da177e4
LT
683
684 /*
d3a4f48d 685 * Now maybe reload the debug registers and handle I/O bitmaps
1da177e4 686 */
d3a4f48d
SE
687 if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
688 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))
689 __switch_to_xtra(prev_p, next_p, tss);
1da177e4 690
e07e23e1
AV
691 /* If the task has used fpu the last 5 timeslices, just do a full
692 * restore of the math state immediately to avoid the trap; the
693 * chances of needing FPU soon are obviously high now
694 */
695 if (next_p->fpu_counter>5)
696 math_state_restore();
1da177e4
LT
697 return prev_p;
698}
699
700/*
701 * sys_execve() executes a new program.
702 */
703asmlinkage
704long sys_execve(char __user *name, char __user * __user *argv,
705 char __user * __user *envp, struct pt_regs regs)
706{
707 long error;
708 char * filename;
709
710 filename = getname(name);
711 error = PTR_ERR(filename);
712 if (IS_ERR(filename))
713 return error;
714 error = do_execve(filename, argv, envp, &regs);
1da177e4
LT
715 putname(filename);
716 return error;
717}
718
719void set_personality_64bit(void)
720{
721 /* inherit personality from parent */
722
723 /* Make sure to be in 64bit mode */
724 clear_thread_flag(TIF_IA32);
725
726 /* TBD: overwrites user setup. Should have two bits.
727 But 64bit processes have always behaved this way,
728 so it's not too bad. The main problem is just that
729 32bit childs are affected again. */
730 current->personality &= ~READ_IMPLIES_EXEC;
731}
732
733asmlinkage long sys_fork(struct pt_regs *regs)
734{
65ea5b03 735 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
1da177e4
LT
736}
737
a88cde13
AK
738asmlinkage long
739sys_clone(unsigned long clone_flags, unsigned long newsp,
740 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
1da177e4
LT
741{
742 if (!newsp)
65ea5b03 743 newsp = regs->sp;
1da177e4
LT
744 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
745}
746
747/*
748 * This is trivial, and on the face of it looks like it
749 * could equally well be done in user mode.
750 *
751 * Not so, for quite unobvious reasons - register pressure.
752 * In user mode vfork() cannot have a stack frame, and if
753 * done by calling the "clone()" system call directly, you
754 * do not have enough call-clobbered registers to hold all
755 * the information you need.
756 */
757asmlinkage long sys_vfork(struct pt_regs *regs)
758{
65ea5b03 759 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
1da177e4
LT
760 NULL, NULL);
761}
762
763unsigned long get_wchan(struct task_struct *p)
764{
765 unsigned long stack;
65ea5b03 766 u64 fp,ip;
1da177e4
LT
767 int count = 0;
768
769 if (!p || p == current || p->state==TASK_RUNNING)
770 return 0;
57eafdc2 771 stack = (unsigned long)task_stack_page(p);
1da177e4
LT
772 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
773 return 0;
774 fp = *(u64 *)(p->thread.rsp);
775 do {
a88cde13
AK
776 if (fp < (unsigned long)stack ||
777 fp > (unsigned long)stack+THREAD_SIZE)
1da177e4 778 return 0;
65ea5b03
PA
779 ip = *(u64 *)(fp+8);
780 if (!in_sched_functions(ip))
781 return ip;
1da177e4
LT
782 fp = *(u64 *)fp;
783 } while (count++ < 16);
784 return 0;
785}
786
787long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
788{
789 int ret = 0;
790 int doit = task == current;
791 int cpu;
792
793 switch (code) {
794 case ARCH_SET_GS:
84929801 795 if (addr >= TASK_SIZE_OF(task))
1da177e4
LT
796 return -EPERM;
797 cpu = get_cpu();
798 /* handle small bases via the GDT because that's faster to
799 switch. */
800 if (addr <= 0xffffffff) {
801 set_32bit_tls(task, GS_TLS, addr);
802 if (doit) {
803 load_TLS(&task->thread, cpu);
804 load_gs_index(GS_TLS_SEL);
805 }
806 task->thread.gsindex = GS_TLS_SEL;
807 task->thread.gs = 0;
808 } else {
809 task->thread.gsindex = 0;
810 task->thread.gs = addr;
811 if (doit) {
a88cde13
AK
812 load_gs_index(0);
813 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
1da177e4
LT
814 }
815 }
816 put_cpu();
817 break;
818 case ARCH_SET_FS:
819 /* Not strictly needed for fs, but do it for symmetry
820 with gs */
84929801 821 if (addr >= TASK_SIZE_OF(task))
1da177e4
LT
822 return -EPERM;
823 cpu = get_cpu();
824 /* handle small bases via the GDT because that's faster to
825 switch. */
826 if (addr <= 0xffffffff) {
827 set_32bit_tls(task, FS_TLS, addr);
828 if (doit) {
829 load_TLS(&task->thread, cpu);
a88cde13 830 asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
1da177e4
LT
831 }
832 task->thread.fsindex = FS_TLS_SEL;
833 task->thread.fs = 0;
834 } else {
835 task->thread.fsindex = 0;
836 task->thread.fs = addr;
837 if (doit) {
838 /* set the selector to 0 to not confuse
839 __switch_to */
a88cde13
AK
840 asm volatile("movl %0,%%fs" :: "r" (0));
841 ret = checking_wrmsrl(MSR_FS_BASE, addr);
1da177e4
LT
842 }
843 }
844 put_cpu();
845 break;
846 case ARCH_GET_FS: {
847 unsigned long base;
848 if (task->thread.fsindex == FS_TLS_SEL)
849 base = read_32bit_tls(task, FS_TLS);
a88cde13 850 else if (doit)
1da177e4 851 rdmsrl(MSR_FS_BASE, base);
a88cde13 852 else
1da177e4
LT
853 base = task->thread.fs;
854 ret = put_user(base, (unsigned long __user *)addr);
855 break;
856 }
857 case ARCH_GET_GS: {
858 unsigned long base;
97c2803c 859 unsigned gsindex;
1da177e4
LT
860 if (task->thread.gsindex == GS_TLS_SEL)
861 base = read_32bit_tls(task, GS_TLS);
97c2803c
JB
862 else if (doit) {
863 asm("movl %%gs,%0" : "=r" (gsindex));
864 if (gsindex)
865 rdmsrl(MSR_KERNEL_GS_BASE, base);
866 else
867 base = task->thread.gs;
868 }
a88cde13 869 else
1da177e4
LT
870 base = task->thread.gs;
871 ret = put_user(base, (unsigned long __user *)addr);
872 break;
873 }
874
875 default:
876 ret = -EINVAL;
877 break;
878 }
879
880 return ret;
881}
882
883long sys_arch_prctl(int code, unsigned long addr)
884{
885 return do_arch_prctl(current, code, addr);
886}
887
888/*
889 * Capture the user space registers if the task is not running (in user space)
890 */
891int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
892{
893 struct pt_regs *pp, ptregs;
894
bb049232 895 pp = task_pt_regs(tsk);
1da177e4
LT
896
897 ptregs = *pp;
898 ptregs.cs &= 0xffff;
899 ptregs.ss &= 0xffff;
900
901 elf_core_copy_regs(regs, &ptregs);
902
903 return 1;
904}
905
906unsigned long arch_align_stack(unsigned long sp)
907{
c16b63e0 908 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1da177e4
LT
909 sp -= get_random_int() % 8192;
910 return sp & ~0xf;
911}
c1d171a0
JK
912
913unsigned long arch_randomize_brk(struct mm_struct *mm)
914{
915 unsigned long range_end = mm->brk + 0x02000000;
916 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
917}
918
This page took 0.776145 seconds and 5 git commands to generate.