5107cb214c7b1840ac6f7681badfdd59fac69996
[deliverable/linux.git] / arch / x86 / kernel / process_64.c
1 /*
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 *
7 * X86-64 port
8 * Andi Kleen.
9 *
10 * CPU hotplug support - ashok.raj@intel.com
11 */
12
13 /*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
17 #include <stdarg.h>
18
19 #include <linux/cpu.h>
20 #include <linux/errno.h>
21 #include <linux/sched.h>
22 #include <linux/fs.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/utsname.h>
31 #include <linux/delay.h>
32 #include <linux/module.h>
33 #include <linux/ptrace.h>
34 #include <linux/random.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/tick.h>
39 #include <linux/prctl.h>
40
41 #include <asm/uaccess.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
44 #include <asm/io.h>
45 #include <asm/processor.h>
46 #include <asm/i387.h>
47 #include <asm/mmu_context.h>
48 #include <asm/pda.h>
49 #include <asm/prctl.h>
50 #include <asm/desc.h>
51 #include <asm/proto.h>
52 #include <asm/ia32.h>
53 #include <asm/idle.h>
54
55 asmlinkage extern void ret_from_fork(void);
56
57 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
58
59 unsigned long boot_option_idle_override = 0;
60 EXPORT_SYMBOL(boot_option_idle_override);
61
62 /*
63 * Powermanagement idle function, if any..
64 */
65 void (*pm_idle)(void);
66 EXPORT_SYMBOL(pm_idle);
67
68 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
69
70 void idle_notifier_register(struct notifier_block *n)
71 {
72 atomic_notifier_chain_register(&idle_notifier, n);
73 }
74
75 void enter_idle(void)
76 {
77 write_pda(isidle, 1);
78 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
79 }
80
81 static void __exit_idle(void)
82 {
83 if (test_and_clear_bit_pda(0, isidle) == 0)
84 return;
85 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
86 }
87
88 /* Called from interrupts to signify idle end */
89 void exit_idle(void)
90 {
91 /* idle loop has pid 0 */
92 if (current->pid)
93 return;
94 __exit_idle();
95 }
96
97 /*
98 * We use this if we don't have any better
99 * idle routine..
100 */
101 void default_idle(void)
102 {
103 current_thread_info()->status &= ~TS_POLLING;
104 /*
105 * TS_POLLING-cleared state must be visible before we
106 * test NEED_RESCHED:
107 */
108 smp_mb();
109 if (!need_resched())
110 safe_halt(); /* enables interrupts racelessly */
111 else
112 local_irq_enable();
113 current_thread_info()->status |= TS_POLLING;
114 }
115
116 #ifdef CONFIG_HOTPLUG_CPU
117 DECLARE_PER_CPU(int, cpu_state);
118
119 #include <asm/nmi.h>
120 /* We halt the CPU with physical CPU hotplug */
121 static inline void play_dead(void)
122 {
123 idle_task_exit();
124 wbinvd();
125 mb();
126 /* Ack it */
127 __get_cpu_var(cpu_state) = CPU_DEAD;
128
129 local_irq_disable();
130 while (1)
131 halt();
132 }
133 #else
134 static inline void play_dead(void)
135 {
136 BUG();
137 }
138 #endif /* CONFIG_HOTPLUG_CPU */
139
140 /*
141 * The idle thread. There's no useful work to be
142 * done, so just try to conserve power and have a
143 * low exit latency (ie sit in a loop waiting for
144 * somebody to say that they'd like to reschedule)
145 */
146 void cpu_idle(void)
147 {
148 current_thread_info()->status |= TS_POLLING;
149
150 /*
151 * If we're the non-boot CPU, nothing set the PDA stack
152 * canary up for us - and if we are the boot CPU we have
153 * a 0 stack canary. This is a good place for updating
154 * it, as we wont ever return from this function (so the
155 * invalid canaries already on the stack wont ever
156 * trigger):
157 */
158 boot_init_stack_canary();
159
160 /* endless idle loop with no priority at all */
161 while (1) {
162 tick_nohz_stop_sched_tick();
163 while (!need_resched()) {
164 void (*idle)(void);
165
166 rmb();
167 idle = pm_idle;
168 if (!idle)
169 idle = default_idle;
170 if (cpu_is_offline(smp_processor_id()))
171 play_dead();
172 /*
173 * Idle routines should keep interrupts disabled
174 * from here on, until they go to idle.
175 * Otherwise, idle callbacks can misfire.
176 */
177 local_irq_disable();
178 enter_idle();
179 idle();
180 /* In many cases the interrupt that ended idle
181 has already called exit_idle. But some idle
182 loops can be woken up without interrupt. */
183 __exit_idle();
184 }
185
186 tick_nohz_restart_sched_tick();
187 preempt_enable_no_resched();
188 schedule();
189 preempt_disable();
190 }
191 }
192
193 /* Prints also some state that isn't saved in the pt_regs */
194 void __show_regs(struct pt_regs * regs)
195 {
196 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
197 unsigned long d0, d1, d2, d3, d6, d7;
198 unsigned int fsindex, gsindex;
199 unsigned int ds, cs, es;
200
201 printk("\n");
202 print_modules();
203 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
204 current->pid, current->comm, print_tainted(),
205 init_utsname()->release,
206 (int)strcspn(init_utsname()->version, " "),
207 init_utsname()->version);
208 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
209 printk_address(regs->ip, 1);
210 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp,
211 regs->flags);
212 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
213 regs->ax, regs->bx, regs->cx);
214 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
215 regs->dx, regs->si, regs->di);
216 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
217 regs->bp, regs->r8, regs->r9);
218 printk("R10: %016lx R11: %016lx R12: %016lx\n",
219 regs->r10, regs->r11, regs->r12);
220 printk("R13: %016lx R14: %016lx R15: %016lx\n",
221 regs->r13, regs->r14, regs->r15);
222
223 asm("movl %%ds,%0" : "=r" (ds));
224 asm("movl %%cs,%0" : "=r" (cs));
225 asm("movl %%es,%0" : "=r" (es));
226 asm("movl %%fs,%0" : "=r" (fsindex));
227 asm("movl %%gs,%0" : "=r" (gsindex));
228
229 rdmsrl(MSR_FS_BASE, fs);
230 rdmsrl(MSR_GS_BASE, gs);
231 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
232
233 cr0 = read_cr0();
234 cr2 = read_cr2();
235 cr3 = read_cr3();
236 cr4 = read_cr4();
237
238 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
239 fs,fsindex,gs,gsindex,shadowgs);
240 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
241 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
242
243 get_debugreg(d0, 0);
244 get_debugreg(d1, 1);
245 get_debugreg(d2, 2);
246 printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
247 get_debugreg(d3, 3);
248 get_debugreg(d6, 6);
249 get_debugreg(d7, 7);
250 printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
251 }
252
253 void show_regs(struct pt_regs *regs)
254 {
255 printk("CPU %d:", smp_processor_id());
256 __show_regs(regs);
257 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
258 }
259
260 /*
261 * Free current thread data structures etc..
262 */
263 void exit_thread(void)
264 {
265 struct task_struct *me = current;
266 struct thread_struct *t = &me->thread;
267
268 if (me->thread.io_bitmap_ptr) {
269 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
270
271 kfree(t->io_bitmap_ptr);
272 t->io_bitmap_ptr = NULL;
273 clear_thread_flag(TIF_IO_BITMAP);
274 /*
275 * Careful, clear this in the TSS too:
276 */
277 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
278 t->io_bitmap_max = 0;
279 put_cpu();
280 }
281 }
282
283 void flush_thread(void)
284 {
285 struct task_struct *tsk = current;
286
287 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
288 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
289 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
290 clear_tsk_thread_flag(tsk, TIF_IA32);
291 } else {
292 set_tsk_thread_flag(tsk, TIF_IA32);
293 current_thread_info()->status |= TS_COMPAT;
294 }
295 }
296 clear_tsk_thread_flag(tsk, TIF_DEBUG);
297
298 tsk->thread.debugreg0 = 0;
299 tsk->thread.debugreg1 = 0;
300 tsk->thread.debugreg2 = 0;
301 tsk->thread.debugreg3 = 0;
302 tsk->thread.debugreg6 = 0;
303 tsk->thread.debugreg7 = 0;
304 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
305 /*
306 * Forget coprocessor state..
307 */
308 clear_fpu(tsk);
309 clear_used_math();
310 }
311
312 void release_thread(struct task_struct *dead_task)
313 {
314 if (dead_task->mm) {
315 if (dead_task->mm->context.size) {
316 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
317 dead_task->comm,
318 dead_task->mm->context.ldt,
319 dead_task->mm->context.size);
320 BUG();
321 }
322 }
323 }
324
325 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
326 {
327 struct user_desc ud = {
328 .base_addr = addr,
329 .limit = 0xfffff,
330 .seg_32bit = 1,
331 .limit_in_pages = 1,
332 .useable = 1,
333 };
334 struct desc_struct *desc = t->thread.tls_array;
335 desc += tls;
336 fill_ldt(desc, &ud);
337 }
338
339 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
340 {
341 return get_desc_base(&t->thread.tls_array[tls]);
342 }
343
344 /*
345 * This gets called before we allocate a new thread and copy
346 * the current task into it.
347 */
348 void prepare_to_copy(struct task_struct *tsk)
349 {
350 unlazy_fpu(tsk);
351 }
352
353 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
354 unsigned long unused,
355 struct task_struct * p, struct pt_regs * regs)
356 {
357 int err;
358 struct pt_regs * childregs;
359 struct task_struct *me = current;
360
361 childregs = ((struct pt_regs *)
362 (THREAD_SIZE + task_stack_page(p))) - 1;
363 *childregs = *regs;
364
365 childregs->ax = 0;
366 childregs->sp = sp;
367 if (sp == ~0UL)
368 childregs->sp = (unsigned long)childregs;
369
370 p->thread.sp = (unsigned long) childregs;
371 p->thread.sp0 = (unsigned long) (childregs+1);
372 p->thread.usersp = me->thread.usersp;
373
374 set_tsk_thread_flag(p, TIF_FORK);
375
376 p->thread.fs = me->thread.fs;
377 p->thread.gs = me->thread.gs;
378
379 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
380 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
381 asm("mov %%es,%0" : "=m" (p->thread.es));
382 asm("mov %%ds,%0" : "=m" (p->thread.ds));
383
384 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
385 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
386 if (!p->thread.io_bitmap_ptr) {
387 p->thread.io_bitmap_max = 0;
388 return -ENOMEM;
389 }
390 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
391 IO_BITMAP_BYTES);
392 set_tsk_thread_flag(p, TIF_IO_BITMAP);
393 }
394
395 /*
396 * Set a new TLS for the child thread?
397 */
398 if (clone_flags & CLONE_SETTLS) {
399 #ifdef CONFIG_IA32_EMULATION
400 if (test_thread_flag(TIF_IA32))
401 err = do_set_thread_area(p, -1,
402 (struct user_desc __user *)childregs->si, 0);
403 else
404 #endif
405 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
406 if (err)
407 goto out;
408 }
409 err = 0;
410 out:
411 if (err && p->thread.io_bitmap_ptr) {
412 kfree(p->thread.io_bitmap_ptr);
413 p->thread.io_bitmap_max = 0;
414 }
415 return err;
416 }
417
418 void
419 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
420 {
421 asm volatile("movl %0, %%fs; movl %0, %%es; movl %0, %%ds" :: "r"(0));
422 load_gs_index(0);
423 regs->ip = new_ip;
424 regs->sp = new_sp;
425 write_pda(oldrsp, new_sp);
426 regs->cs = __USER_CS;
427 regs->ss = __USER_DS;
428 regs->flags = 0x200;
429 set_fs(USER_DS);
430 /*
431 * Free the old FP and other extended state
432 */
433 free_thread_xstate(current);
434 }
435 EXPORT_SYMBOL_GPL(start_thread);
436
437 static void hard_disable_TSC(void)
438 {
439 write_cr4(read_cr4() | X86_CR4_TSD);
440 }
441
442 void disable_TSC(void)
443 {
444 preempt_disable();
445 if (!test_and_set_thread_flag(TIF_NOTSC))
446 /*
447 * Must flip the CPU state synchronously with
448 * TIF_NOTSC in the current running context.
449 */
450 hard_disable_TSC();
451 preempt_enable();
452 }
453
454 static void hard_enable_TSC(void)
455 {
456 write_cr4(read_cr4() & ~X86_CR4_TSD);
457 }
458
459 static void enable_TSC(void)
460 {
461 preempt_disable();
462 if (test_and_clear_thread_flag(TIF_NOTSC))
463 /*
464 * Must flip the CPU state synchronously with
465 * TIF_NOTSC in the current running context.
466 */
467 hard_enable_TSC();
468 preempt_enable();
469 }
470
471 int get_tsc_mode(unsigned long adr)
472 {
473 unsigned int val;
474
475 if (test_thread_flag(TIF_NOTSC))
476 val = PR_TSC_SIGSEGV;
477 else
478 val = PR_TSC_ENABLE;
479
480 return put_user(val, (unsigned int __user *)adr);
481 }
482
483 int set_tsc_mode(unsigned int val)
484 {
485 if (val == PR_TSC_SIGSEGV)
486 disable_TSC();
487 else if (val == PR_TSC_ENABLE)
488 enable_TSC();
489 else
490 return -EINVAL;
491
492 return 0;
493 }
494
495 /*
496 * This special macro can be used to load a debugging register
497 */
498 #define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
499
500 static inline void __switch_to_xtra(struct task_struct *prev_p,
501 struct task_struct *next_p,
502 struct tss_struct *tss)
503 {
504 struct thread_struct *prev, *next;
505 unsigned long debugctl;
506
507 prev = &prev_p->thread,
508 next = &next_p->thread;
509
510 debugctl = prev->debugctlmsr;
511 if (next->ds_area_msr != prev->ds_area_msr) {
512 /* we clear debugctl to make sure DS
513 * is not in use when we change it */
514 debugctl = 0;
515 update_debugctlmsr(0);
516 wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr);
517 }
518
519 if (next->debugctlmsr != debugctl)
520 update_debugctlmsr(next->debugctlmsr);
521
522 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
523 loaddebug(next, 0);
524 loaddebug(next, 1);
525 loaddebug(next, 2);
526 loaddebug(next, 3);
527 /* no 4 and 5 */
528 loaddebug(next, 6);
529 loaddebug(next, 7);
530 }
531
532 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
533 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
534 /* prev and next are different */
535 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
536 hard_disable_TSC();
537 else
538 hard_enable_TSC();
539 }
540
541 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
542 /*
543 * Copy the relevant range of the IO bitmap.
544 * Normally this is 128 bytes or less:
545 */
546 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
547 max(prev->io_bitmap_max, next->io_bitmap_max));
548 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
549 /*
550 * Clear any possible leftover bits:
551 */
552 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
553 }
554
555 #ifdef X86_BTS
556 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
557 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
558
559 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
560 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
561 #endif
562 }
563
564 /*
565 * switch_to(x,y) should switch tasks from x to y.
566 *
567 * This could still be optimized:
568 * - fold all the options into a flag word and test it with a single test.
569 * - could test fs/gs bitsliced
570 *
571 * Kprobes not supported here. Set the probe on schedule instead.
572 */
573 struct task_struct *
574 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
575 {
576 struct thread_struct *prev = &prev_p->thread,
577 *next = &next_p->thread;
578 int cpu = smp_processor_id();
579 struct tss_struct *tss = &per_cpu(init_tss, cpu);
580
581 /* we're going to use this soon, after a few expensive things */
582 if (next_p->fpu_counter>5)
583 prefetch(next->xstate);
584
585 /*
586 * Reload esp0, LDT and the page table pointer:
587 */
588 load_sp0(tss, next);
589
590 /*
591 * Switch DS and ES.
592 * This won't pick up thread selector changes, but I guess that is ok.
593 */
594 asm volatile("mov %%es,%0" : "=m" (prev->es));
595 if (unlikely(next->es | prev->es))
596 loadsegment(es, next->es);
597
598 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
599 if (unlikely(next->ds | prev->ds))
600 loadsegment(ds, next->ds);
601
602 load_TLS(next, cpu);
603
604 /*
605 * Switch FS and GS.
606 */
607 {
608 unsigned fsindex;
609 asm volatile("movl %%fs,%0" : "=r" (fsindex));
610 /* segment register != 0 always requires a reload.
611 also reload when it has changed.
612 when prev process used 64bit base always reload
613 to avoid an information leak. */
614 if (unlikely(fsindex | next->fsindex | prev->fs)) {
615 loadsegment(fs, next->fsindex);
616 /* check if the user used a selector != 0
617 * if yes clear 64bit base, since overloaded base
618 * is always mapped to the Null selector
619 */
620 if (fsindex)
621 prev->fs = 0;
622 }
623 /* when next process has a 64bit base use it */
624 if (next->fs)
625 wrmsrl(MSR_FS_BASE, next->fs);
626 prev->fsindex = fsindex;
627 }
628 {
629 unsigned gsindex;
630 asm volatile("movl %%gs,%0" : "=r" (gsindex));
631 if (unlikely(gsindex | next->gsindex | prev->gs)) {
632 load_gs_index(next->gsindex);
633 if (gsindex)
634 prev->gs = 0;
635 }
636 if (next->gs)
637 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
638 prev->gsindex = gsindex;
639 }
640
641 /* Must be after DS reload */
642 unlazy_fpu(prev_p);
643
644 /*
645 * Switch the PDA and FPU contexts.
646 */
647 prev->usersp = read_pda(oldrsp);
648 write_pda(oldrsp, next->usersp);
649 write_pda(pcurrent, next_p);
650
651 write_pda(kernelstack,
652 (unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
653 #ifdef CONFIG_CC_STACKPROTECTOR
654 /*
655 * Build time only check to make sure the stack_canary is at
656 * offset 40 in the pda; this is a gcc ABI requirement
657 */
658 BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
659 #endif
660
661 /*
662 * Now maybe reload the debug registers and handle I/O bitmaps
663 */
664 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
665 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
666 __switch_to_xtra(prev_p, next_p, tss);
667
668 /* If the task has used fpu the last 5 timeslices, just do a full
669 * restore of the math state immediately to avoid the trap; the
670 * chances of needing FPU soon are obviously high now
671 */
672 if (next_p->fpu_counter>5)
673 math_state_restore();
674 return prev_p;
675 }
676
677 /*
678 * sys_execve() executes a new program.
679 */
680 asmlinkage
681 long sys_execve(char __user *name, char __user * __user *argv,
682 char __user * __user *envp, struct pt_regs *regs)
683 {
684 long error;
685 char * filename;
686
687 filename = getname(name);
688 error = PTR_ERR(filename);
689 if (IS_ERR(filename))
690 return error;
691 error = do_execve(filename, argv, envp, regs);
692 putname(filename);
693 return error;
694 }
695
696 void set_personality_64bit(void)
697 {
698 /* inherit personality from parent */
699
700 /* Make sure to be in 64bit mode */
701 clear_thread_flag(TIF_IA32);
702
703 /* TBD: overwrites user setup. Should have two bits.
704 But 64bit processes have always behaved this way,
705 so it's not too bad. The main problem is just that
706 32bit childs are affected again. */
707 current->personality &= ~READ_IMPLIES_EXEC;
708 }
709
710 asmlinkage long sys_fork(struct pt_regs *regs)
711 {
712 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
713 }
714
715 asmlinkage long
716 sys_clone(unsigned long clone_flags, unsigned long newsp,
717 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
718 {
719 if (!newsp)
720 newsp = regs->sp;
721 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
722 }
723
724 /*
725 * This is trivial, and on the face of it looks like it
726 * could equally well be done in user mode.
727 *
728 * Not so, for quite unobvious reasons - register pressure.
729 * In user mode vfork() cannot have a stack frame, and if
730 * done by calling the "clone()" system call directly, you
731 * do not have enough call-clobbered registers to hold all
732 * the information you need.
733 */
734 asmlinkage long sys_vfork(struct pt_regs *regs)
735 {
736 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
737 NULL, NULL);
738 }
739
740 unsigned long get_wchan(struct task_struct *p)
741 {
742 unsigned long stack;
743 u64 fp,ip;
744 int count = 0;
745
746 if (!p || p == current || p->state==TASK_RUNNING)
747 return 0;
748 stack = (unsigned long)task_stack_page(p);
749 if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
750 return 0;
751 fp = *(u64 *)(p->thread.sp);
752 do {
753 if (fp < (unsigned long)stack ||
754 fp > (unsigned long)stack+THREAD_SIZE)
755 return 0;
756 ip = *(u64 *)(fp+8);
757 if (!in_sched_functions(ip))
758 return ip;
759 fp = *(u64 *)fp;
760 } while (count++ < 16);
761 return 0;
762 }
763
764 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
765 {
766 int ret = 0;
767 int doit = task == current;
768 int cpu;
769
770 switch (code) {
771 case ARCH_SET_GS:
772 if (addr >= TASK_SIZE_OF(task))
773 return -EPERM;
774 cpu = get_cpu();
775 /* handle small bases via the GDT because that's faster to
776 switch. */
777 if (addr <= 0xffffffff) {
778 set_32bit_tls(task, GS_TLS, addr);
779 if (doit) {
780 load_TLS(&task->thread, cpu);
781 load_gs_index(GS_TLS_SEL);
782 }
783 task->thread.gsindex = GS_TLS_SEL;
784 task->thread.gs = 0;
785 } else {
786 task->thread.gsindex = 0;
787 task->thread.gs = addr;
788 if (doit) {
789 load_gs_index(0);
790 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
791 }
792 }
793 put_cpu();
794 break;
795 case ARCH_SET_FS:
796 /* Not strictly needed for fs, but do it for symmetry
797 with gs */
798 if (addr >= TASK_SIZE_OF(task))
799 return -EPERM;
800 cpu = get_cpu();
801 /* handle small bases via the GDT because that's faster to
802 switch. */
803 if (addr <= 0xffffffff) {
804 set_32bit_tls(task, FS_TLS, addr);
805 if (doit) {
806 load_TLS(&task->thread, cpu);
807 asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
808 }
809 task->thread.fsindex = FS_TLS_SEL;
810 task->thread.fs = 0;
811 } else {
812 task->thread.fsindex = 0;
813 task->thread.fs = addr;
814 if (doit) {
815 /* set the selector to 0 to not confuse
816 __switch_to */
817 asm volatile("movl %0,%%fs" :: "r" (0));
818 ret = checking_wrmsrl(MSR_FS_BASE, addr);
819 }
820 }
821 put_cpu();
822 break;
823 case ARCH_GET_FS: {
824 unsigned long base;
825 if (task->thread.fsindex == FS_TLS_SEL)
826 base = read_32bit_tls(task, FS_TLS);
827 else if (doit)
828 rdmsrl(MSR_FS_BASE, base);
829 else
830 base = task->thread.fs;
831 ret = put_user(base, (unsigned long __user *)addr);
832 break;
833 }
834 case ARCH_GET_GS: {
835 unsigned long base;
836 unsigned gsindex;
837 if (task->thread.gsindex == GS_TLS_SEL)
838 base = read_32bit_tls(task, GS_TLS);
839 else if (doit) {
840 asm("movl %%gs,%0" : "=r" (gsindex));
841 if (gsindex)
842 rdmsrl(MSR_KERNEL_GS_BASE, base);
843 else
844 base = task->thread.gs;
845 }
846 else
847 base = task->thread.gs;
848 ret = put_user(base, (unsigned long __user *)addr);
849 break;
850 }
851
852 default:
853 ret = -EINVAL;
854 break;
855 }
856
857 return ret;
858 }
859
860 long sys_arch_prctl(int code, unsigned long addr)
861 {
862 return do_arch_prctl(current, code, addr);
863 }
864
865 unsigned long arch_align_stack(unsigned long sp)
866 {
867 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
868 sp -= get_random_int() % 8192;
869 return sp & ~0xf;
870 }
871
872 unsigned long arch_randomize_brk(struct mm_struct *mm)
873 {
874 unsigned long range_end = mm->brk + 0x02000000;
875 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
876 }
This page took 0.079589 seconds and 5 git commands to generate.