Merge branch 'x86/unify-cpu-detect' into x86-v28-for-linus-phase4-D
[deliverable/linux.git] / arch / x86 / kernel / process_64.c
1 /*
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 *
7 * X86-64 port
8 * Andi Kleen.
9 *
10 * CPU hotplug support - ashok.raj@intel.com
11 */
12
13 /*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
17 #include <stdarg.h>
18
19 #include <linux/cpu.h>
20 #include <linux/errno.h>
21 #include <linux/sched.h>
22 #include <linux/fs.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/utsname.h>
31 #include <linux/delay.h>
32 #include <linux/module.h>
33 #include <linux/ptrace.h>
34 #include <linux/random.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/tick.h>
39 #include <linux/prctl.h>
40 #include <linux/uaccess.h>
41 #include <linux/io.h>
42
43 #include <asm/pgtable.h>
44 #include <asm/system.h>
45 #include <asm/processor.h>
46 #include <asm/i387.h>
47 #include <asm/mmu_context.h>
48 #include <asm/pda.h>
49 #include <asm/prctl.h>
50 #include <asm/desc.h>
51 #include <asm/proto.h>
52 #include <asm/ia32.h>
53 #include <asm/idle.h>
54 #include <asm/syscalls.h>
55
56 asmlinkage extern void ret_from_fork(void);
57
58 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
59
60 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
61
62 void idle_notifier_register(struct notifier_block *n)
63 {
64 atomic_notifier_chain_register(&idle_notifier, n);
65 }
66
67 void enter_idle(void)
68 {
69 write_pda(isidle, 1);
70 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
71 }
72
73 static void __exit_idle(void)
74 {
75 if (test_and_clear_bit_pda(0, isidle) == 0)
76 return;
77 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
78 }
79
80 /* Called from interrupts to signify idle end */
81 void exit_idle(void)
82 {
83 /* idle loop has pid 0 */
84 if (current->pid)
85 return;
86 __exit_idle();
87 }
88
89 #ifdef CONFIG_HOTPLUG_CPU
90 DECLARE_PER_CPU(int, cpu_state);
91
92 #include <linux/nmi.h>
93 /* We halt the CPU with physical CPU hotplug */
94 static inline void play_dead(void)
95 {
96 idle_task_exit();
97 c1e_remove_cpu(raw_smp_processor_id());
98
99 mb();
100 /* Ack it */
101 __get_cpu_var(cpu_state) = CPU_DEAD;
102
103 local_irq_disable();
104 /* mask all interrupts, flush any and all caches, and halt */
105 wbinvd_halt();
106 }
107 #else
108 static inline void play_dead(void)
109 {
110 BUG();
111 }
112 #endif /* CONFIG_HOTPLUG_CPU */
113
114 /*
115 * The idle thread. There's no useful work to be
116 * done, so just try to conserve power and have a
117 * low exit latency (ie sit in a loop waiting for
118 * somebody to say that they'd like to reschedule)
119 */
120 void cpu_idle(void)
121 {
122 current_thread_info()->status |= TS_POLLING;
123 /* endless idle loop with no priority at all */
124 while (1) {
125 tick_nohz_stop_sched_tick(1);
126 while (!need_resched()) {
127
128 rmb();
129
130 if (cpu_is_offline(smp_processor_id()))
131 play_dead();
132 /*
133 * Idle routines should keep interrupts disabled
134 * from here on, until they go to idle.
135 * Otherwise, idle callbacks can misfire.
136 */
137 local_irq_disable();
138 enter_idle();
139 /* Don't trace irqs off for idle */
140 stop_critical_timings();
141 pm_idle();
142 start_critical_timings();
143 /* In many cases the interrupt that ended idle
144 has already called exit_idle. But some idle
145 loops can be woken up without interrupt. */
146 __exit_idle();
147 }
148
149 tick_nohz_restart_sched_tick();
150 preempt_enable_no_resched();
151 schedule();
152 preempt_disable();
153 }
154 }
155
156 /* Prints also some state that isn't saved in the pt_regs */
157 void __show_regs(struct pt_regs *regs)
158 {
159 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
160 unsigned long d0, d1, d2, d3, d6, d7;
161 unsigned int fsindex, gsindex;
162 unsigned int ds, cs, es;
163
164 printk("\n");
165 print_modules();
166 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
167 current->pid, current->comm, print_tainted(),
168 init_utsname()->release,
169 (int)strcspn(init_utsname()->version, " "),
170 init_utsname()->version);
171 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
172 printk_address(regs->ip, 1);
173 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
174 regs->sp, regs->flags);
175 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
176 regs->ax, regs->bx, regs->cx);
177 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
178 regs->dx, regs->si, regs->di);
179 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
180 regs->bp, regs->r8, regs->r9);
181 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
182 regs->r10, regs->r11, regs->r12);
183 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
184 regs->r13, regs->r14, regs->r15);
185
186 asm("movl %%ds,%0" : "=r" (ds));
187 asm("movl %%cs,%0" : "=r" (cs));
188 asm("movl %%es,%0" : "=r" (es));
189 asm("movl %%fs,%0" : "=r" (fsindex));
190 asm("movl %%gs,%0" : "=r" (gsindex));
191
192 rdmsrl(MSR_FS_BASE, fs);
193 rdmsrl(MSR_GS_BASE, gs);
194 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
195
196 cr0 = read_cr0();
197 cr2 = read_cr2();
198 cr3 = read_cr3();
199 cr4 = read_cr4();
200
201 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
202 fs, fsindex, gs, gsindex, shadowgs);
203 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
204 es, cr0);
205 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
206 cr4);
207
208 get_debugreg(d0, 0);
209 get_debugreg(d1, 1);
210 get_debugreg(d2, 2);
211 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
212 get_debugreg(d3, 3);
213 get_debugreg(d6, 6);
214 get_debugreg(d7, 7);
215 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
216 }
217
218 void show_regs(struct pt_regs *regs)
219 {
220 printk(KERN_INFO "CPU %d:", smp_processor_id());
221 __show_regs(regs);
222 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
223 }
224
225 /*
226 * Free current thread data structures etc..
227 */
228 void exit_thread(void)
229 {
230 struct task_struct *me = current;
231 struct thread_struct *t = &me->thread;
232
233 if (me->thread.io_bitmap_ptr) {
234 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
235
236 kfree(t->io_bitmap_ptr);
237 t->io_bitmap_ptr = NULL;
238 clear_thread_flag(TIF_IO_BITMAP);
239 /*
240 * Careful, clear this in the TSS too:
241 */
242 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
243 t->io_bitmap_max = 0;
244 put_cpu();
245 }
246 #ifdef CONFIG_X86_DS
247 /* Free any DS contexts that have not been properly released. */
248 if (unlikely(t->ds_ctx)) {
249 /* we clear debugctl to make sure DS is not used. */
250 update_debugctlmsr(0);
251 ds_free(t->ds_ctx);
252 }
253 #endif /* CONFIG_X86_DS */
254 }
255
256 void flush_thread(void)
257 {
258 struct task_struct *tsk = current;
259
260 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
261 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
262 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
263 clear_tsk_thread_flag(tsk, TIF_IA32);
264 } else {
265 set_tsk_thread_flag(tsk, TIF_IA32);
266 current_thread_info()->status |= TS_COMPAT;
267 }
268 }
269 clear_tsk_thread_flag(tsk, TIF_DEBUG);
270
271 tsk->thread.debugreg0 = 0;
272 tsk->thread.debugreg1 = 0;
273 tsk->thread.debugreg2 = 0;
274 tsk->thread.debugreg3 = 0;
275 tsk->thread.debugreg6 = 0;
276 tsk->thread.debugreg7 = 0;
277 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
278 /*
279 * Forget coprocessor state..
280 */
281 tsk->fpu_counter = 0;
282 clear_fpu(tsk);
283 clear_used_math();
284 }
285
286 void release_thread(struct task_struct *dead_task)
287 {
288 if (dead_task->mm) {
289 if (dead_task->mm->context.size) {
290 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
291 dead_task->comm,
292 dead_task->mm->context.ldt,
293 dead_task->mm->context.size);
294 BUG();
295 }
296 }
297 }
298
299 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
300 {
301 struct user_desc ud = {
302 .base_addr = addr,
303 .limit = 0xfffff,
304 .seg_32bit = 1,
305 .limit_in_pages = 1,
306 .useable = 1,
307 };
308 struct desc_struct *desc = t->thread.tls_array;
309 desc += tls;
310 fill_ldt(desc, &ud);
311 }
312
313 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
314 {
315 return get_desc_base(&t->thread.tls_array[tls]);
316 }
317
318 /*
319 * This gets called before we allocate a new thread and copy
320 * the current task into it.
321 */
322 void prepare_to_copy(struct task_struct *tsk)
323 {
324 unlazy_fpu(tsk);
325 }
326
327 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
328 unsigned long unused,
329 struct task_struct *p, struct pt_regs *regs)
330 {
331 int err;
332 struct pt_regs *childregs;
333 struct task_struct *me = current;
334
335 childregs = ((struct pt_regs *)
336 (THREAD_SIZE + task_stack_page(p))) - 1;
337 *childregs = *regs;
338
339 childregs->ax = 0;
340 childregs->sp = sp;
341 if (sp == ~0UL)
342 childregs->sp = (unsigned long)childregs;
343
344 p->thread.sp = (unsigned long) childregs;
345 p->thread.sp0 = (unsigned long) (childregs+1);
346 p->thread.usersp = me->thread.usersp;
347
348 set_tsk_thread_flag(p, TIF_FORK);
349
350 p->thread.fs = me->thread.fs;
351 p->thread.gs = me->thread.gs;
352
353 savesegment(gs, p->thread.gsindex);
354 savesegment(fs, p->thread.fsindex);
355 savesegment(es, p->thread.es);
356 savesegment(ds, p->thread.ds);
357
358 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
359 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
360 if (!p->thread.io_bitmap_ptr) {
361 p->thread.io_bitmap_max = 0;
362 return -ENOMEM;
363 }
364 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
365 IO_BITMAP_BYTES);
366 set_tsk_thread_flag(p, TIF_IO_BITMAP);
367 }
368
369 /*
370 * Set a new TLS for the child thread?
371 */
372 if (clone_flags & CLONE_SETTLS) {
373 #ifdef CONFIG_IA32_EMULATION
374 if (test_thread_flag(TIF_IA32))
375 err = do_set_thread_area(p, -1,
376 (struct user_desc __user *)childregs->si, 0);
377 else
378 #endif
379 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
380 if (err)
381 goto out;
382 }
383 err = 0;
384 out:
385 if (err && p->thread.io_bitmap_ptr) {
386 kfree(p->thread.io_bitmap_ptr);
387 p->thread.io_bitmap_max = 0;
388 }
389 return err;
390 }
391
392 void
393 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
394 {
395 loadsegment(fs, 0);
396 loadsegment(es, 0);
397 loadsegment(ds, 0);
398 load_gs_index(0);
399 regs->ip = new_ip;
400 regs->sp = new_sp;
401 write_pda(oldrsp, new_sp);
402 regs->cs = __USER_CS;
403 regs->ss = __USER_DS;
404 regs->flags = 0x200;
405 set_fs(USER_DS);
406 /*
407 * Free the old FP and other extended state
408 */
409 free_thread_xstate(current);
410 }
411 EXPORT_SYMBOL_GPL(start_thread);
412
413 static void hard_disable_TSC(void)
414 {
415 write_cr4(read_cr4() | X86_CR4_TSD);
416 }
417
418 void disable_TSC(void)
419 {
420 preempt_disable();
421 if (!test_and_set_thread_flag(TIF_NOTSC))
422 /*
423 * Must flip the CPU state synchronously with
424 * TIF_NOTSC in the current running context.
425 */
426 hard_disable_TSC();
427 preempt_enable();
428 }
429
430 static void hard_enable_TSC(void)
431 {
432 write_cr4(read_cr4() & ~X86_CR4_TSD);
433 }
434
435 static void enable_TSC(void)
436 {
437 preempt_disable();
438 if (test_and_clear_thread_flag(TIF_NOTSC))
439 /*
440 * Must flip the CPU state synchronously with
441 * TIF_NOTSC in the current running context.
442 */
443 hard_enable_TSC();
444 preempt_enable();
445 }
446
447 int get_tsc_mode(unsigned long adr)
448 {
449 unsigned int val;
450
451 if (test_thread_flag(TIF_NOTSC))
452 val = PR_TSC_SIGSEGV;
453 else
454 val = PR_TSC_ENABLE;
455
456 return put_user(val, (unsigned int __user *)adr);
457 }
458
459 int set_tsc_mode(unsigned int val)
460 {
461 if (val == PR_TSC_SIGSEGV)
462 disable_TSC();
463 else if (val == PR_TSC_ENABLE)
464 enable_TSC();
465 else
466 return -EINVAL;
467
468 return 0;
469 }
470
471 /*
472 * This special macro can be used to load a debugging register
473 */
474 #define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
475
476 static inline void __switch_to_xtra(struct task_struct *prev_p,
477 struct task_struct *next_p,
478 struct tss_struct *tss)
479 {
480 struct thread_struct *prev, *next;
481 unsigned long debugctl;
482
483 prev = &prev_p->thread,
484 next = &next_p->thread;
485
486 debugctl = prev->debugctlmsr;
487
488 #ifdef CONFIG_X86_DS
489 {
490 unsigned long ds_prev = 0, ds_next = 0;
491
492 if (prev->ds_ctx)
493 ds_prev = (unsigned long)prev->ds_ctx->ds;
494 if (next->ds_ctx)
495 ds_next = (unsigned long)next->ds_ctx->ds;
496
497 if (ds_next != ds_prev) {
498 /*
499 * We clear debugctl to make sure DS
500 * is not in use when we change it:
501 */
502 debugctl = 0;
503 update_debugctlmsr(0);
504 wrmsrl(MSR_IA32_DS_AREA, ds_next);
505 }
506 }
507 #endif /* CONFIG_X86_DS */
508
509 if (next->debugctlmsr != debugctl)
510 update_debugctlmsr(next->debugctlmsr);
511
512 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
513 loaddebug(next, 0);
514 loaddebug(next, 1);
515 loaddebug(next, 2);
516 loaddebug(next, 3);
517 /* no 4 and 5 */
518 loaddebug(next, 6);
519 loaddebug(next, 7);
520 }
521
522 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
523 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
524 /* prev and next are different */
525 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
526 hard_disable_TSC();
527 else
528 hard_enable_TSC();
529 }
530
531 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
532 /*
533 * Copy the relevant range of the IO bitmap.
534 * Normally this is 128 bytes or less:
535 */
536 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
537 max(prev->io_bitmap_max, next->io_bitmap_max));
538 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
539 /*
540 * Clear any possible leftover bits:
541 */
542 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
543 }
544
545 #ifdef CONFIG_X86_PTRACE_BTS
546 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
547 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
548
549 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
550 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
551 #endif /* CONFIG_X86_PTRACE_BTS */
552 }
553
554 /*
555 * switch_to(x,y) should switch tasks from x to y.
556 *
557 * This could still be optimized:
558 * - fold all the options into a flag word and test it with a single test.
559 * - could test fs/gs bitsliced
560 *
561 * Kprobes not supported here. Set the probe on schedule instead.
562 */
563 struct task_struct *
564 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
565 {
566 struct thread_struct *prev = &prev_p->thread;
567 struct thread_struct *next = &next_p->thread;
568 int cpu = smp_processor_id();
569 struct tss_struct *tss = &per_cpu(init_tss, cpu);
570 unsigned fsindex, gsindex;
571
572 /* we're going to use this soon, after a few expensive things */
573 if (next_p->fpu_counter > 5)
574 prefetch(next->xstate);
575
576 /*
577 * Reload esp0, LDT and the page table pointer:
578 */
579 load_sp0(tss, next);
580
581 /*
582 * Switch DS and ES.
583 * This won't pick up thread selector changes, but I guess that is ok.
584 */
585 savesegment(es, prev->es);
586 if (unlikely(next->es | prev->es))
587 loadsegment(es, next->es);
588
589 savesegment(ds, prev->ds);
590 if (unlikely(next->ds | prev->ds))
591 loadsegment(ds, next->ds);
592
593
594 /* We must save %fs and %gs before load_TLS() because
595 * %fs and %gs may be cleared by load_TLS().
596 *
597 * (e.g. xen_load_tls())
598 */
599 savesegment(fs, fsindex);
600 savesegment(gs, gsindex);
601
602 load_TLS(next, cpu);
603
604 /*
605 * Leave lazy mode, flushing any hypercalls made here.
606 * This must be done before restoring TLS segments so
607 * the GDT and LDT are properly updated, and must be
608 * done before math_state_restore, so the TS bit is up
609 * to date.
610 */
611 arch_leave_lazy_cpu_mode();
612
613 /*
614 * Switch FS and GS.
615 *
616 * Segment register != 0 always requires a reload. Also
617 * reload when it has changed. When prev process used 64bit
618 * base always reload to avoid an information leak.
619 */
620 if (unlikely(fsindex | next->fsindex | prev->fs)) {
621 loadsegment(fs, next->fsindex);
622 /*
623 * Check if the user used a selector != 0; if yes
624 * clear 64bit base, since overloaded base is always
625 * mapped to the Null selector
626 */
627 if (fsindex)
628 prev->fs = 0;
629 }
630 /* when next process has a 64bit base use it */
631 if (next->fs)
632 wrmsrl(MSR_FS_BASE, next->fs);
633 prev->fsindex = fsindex;
634
635 if (unlikely(gsindex | next->gsindex | prev->gs)) {
636 load_gs_index(next->gsindex);
637 if (gsindex)
638 prev->gs = 0;
639 }
640 if (next->gs)
641 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
642 prev->gsindex = gsindex;
643
644 /* Must be after DS reload */
645 unlazy_fpu(prev_p);
646
647 /*
648 * Switch the PDA and FPU contexts.
649 */
650 prev->usersp = read_pda(oldrsp);
651 write_pda(oldrsp, next->usersp);
652 write_pda(pcurrent, next_p);
653
654 write_pda(kernelstack,
655 (unsigned long)task_stack_page(next_p) +
656 THREAD_SIZE - PDA_STACKOFFSET);
657 #ifdef CONFIG_CC_STACKPROTECTOR
658 write_pda(stack_canary, next_p->stack_canary);
659 /*
660 * Build time only check to make sure the stack_canary is at
661 * offset 40 in the pda; this is a gcc ABI requirement
662 */
663 BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
664 #endif
665
666 /*
667 * Now maybe reload the debug registers and handle I/O bitmaps
668 */
669 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
670 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
671 __switch_to_xtra(prev_p, next_p, tss);
672
673 /* If the task has used fpu the last 5 timeslices, just do a full
674 * restore of the math state immediately to avoid the trap; the
675 * chances of needing FPU soon are obviously high now
676 *
677 * tsk_used_math() checks prevent calling math_state_restore(),
678 * which can sleep in the case of !tsk_used_math()
679 */
680 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
681 math_state_restore();
682 return prev_p;
683 }
684
685 /*
686 * sys_execve() executes a new program.
687 */
688 asmlinkage
689 long sys_execve(char __user *name, char __user * __user *argv,
690 char __user * __user *envp, struct pt_regs *regs)
691 {
692 long error;
693 char *filename;
694
695 filename = getname(name);
696 error = PTR_ERR(filename);
697 if (IS_ERR(filename))
698 return error;
699 error = do_execve(filename, argv, envp, regs);
700 putname(filename);
701 return error;
702 }
703
704 void set_personality_64bit(void)
705 {
706 /* inherit personality from parent */
707
708 /* Make sure to be in 64bit mode */
709 clear_thread_flag(TIF_IA32);
710
711 /* TBD: overwrites user setup. Should have two bits.
712 But 64bit processes have always behaved this way,
713 so it's not too bad. The main problem is just that
714 32bit childs are affected again. */
715 current->personality &= ~READ_IMPLIES_EXEC;
716 }
717
718 asmlinkage long sys_fork(struct pt_regs *regs)
719 {
720 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
721 }
722
723 asmlinkage long
724 sys_clone(unsigned long clone_flags, unsigned long newsp,
725 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
726 {
727 if (!newsp)
728 newsp = regs->sp;
729 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
730 }
731
732 /*
733 * This is trivial, and on the face of it looks like it
734 * could equally well be done in user mode.
735 *
736 * Not so, for quite unobvious reasons - register pressure.
737 * In user mode vfork() cannot have a stack frame, and if
738 * done by calling the "clone()" system call directly, you
739 * do not have enough call-clobbered registers to hold all
740 * the information you need.
741 */
742 asmlinkage long sys_vfork(struct pt_regs *regs)
743 {
744 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
745 NULL, NULL);
746 }
747
748 unsigned long get_wchan(struct task_struct *p)
749 {
750 unsigned long stack;
751 u64 fp, ip;
752 int count = 0;
753
754 if (!p || p == current || p->state == TASK_RUNNING)
755 return 0;
756 stack = (unsigned long)task_stack_page(p);
757 if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
758 return 0;
759 fp = *(u64 *)(p->thread.sp);
760 do {
761 if (fp < (unsigned long)stack ||
762 fp > (unsigned long)stack+THREAD_SIZE)
763 return 0;
764 ip = *(u64 *)(fp+8);
765 if (!in_sched_functions(ip))
766 return ip;
767 fp = *(u64 *)fp;
768 } while (count++ < 16);
769 return 0;
770 }
771
772 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
773 {
774 int ret = 0;
775 int doit = task == current;
776 int cpu;
777
778 switch (code) {
779 case ARCH_SET_GS:
780 if (addr >= TASK_SIZE_OF(task))
781 return -EPERM;
782 cpu = get_cpu();
783 /* handle small bases via the GDT because that's faster to
784 switch. */
785 if (addr <= 0xffffffff) {
786 set_32bit_tls(task, GS_TLS, addr);
787 if (doit) {
788 load_TLS(&task->thread, cpu);
789 load_gs_index(GS_TLS_SEL);
790 }
791 task->thread.gsindex = GS_TLS_SEL;
792 task->thread.gs = 0;
793 } else {
794 task->thread.gsindex = 0;
795 task->thread.gs = addr;
796 if (doit) {
797 load_gs_index(0);
798 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
799 }
800 }
801 put_cpu();
802 break;
803 case ARCH_SET_FS:
804 /* Not strictly needed for fs, but do it for symmetry
805 with gs */
806 if (addr >= TASK_SIZE_OF(task))
807 return -EPERM;
808 cpu = get_cpu();
809 /* handle small bases via the GDT because that's faster to
810 switch. */
811 if (addr <= 0xffffffff) {
812 set_32bit_tls(task, FS_TLS, addr);
813 if (doit) {
814 load_TLS(&task->thread, cpu);
815 loadsegment(fs, FS_TLS_SEL);
816 }
817 task->thread.fsindex = FS_TLS_SEL;
818 task->thread.fs = 0;
819 } else {
820 task->thread.fsindex = 0;
821 task->thread.fs = addr;
822 if (doit) {
823 /* set the selector to 0 to not confuse
824 __switch_to */
825 loadsegment(fs, 0);
826 ret = checking_wrmsrl(MSR_FS_BASE, addr);
827 }
828 }
829 put_cpu();
830 break;
831 case ARCH_GET_FS: {
832 unsigned long base;
833 if (task->thread.fsindex == FS_TLS_SEL)
834 base = read_32bit_tls(task, FS_TLS);
835 else if (doit)
836 rdmsrl(MSR_FS_BASE, base);
837 else
838 base = task->thread.fs;
839 ret = put_user(base, (unsigned long __user *)addr);
840 break;
841 }
842 case ARCH_GET_GS: {
843 unsigned long base;
844 unsigned gsindex;
845 if (task->thread.gsindex == GS_TLS_SEL)
846 base = read_32bit_tls(task, GS_TLS);
847 else if (doit) {
848 savesegment(gs, gsindex);
849 if (gsindex)
850 rdmsrl(MSR_KERNEL_GS_BASE, base);
851 else
852 base = task->thread.gs;
853 } else
854 base = task->thread.gs;
855 ret = put_user(base, (unsigned long __user *)addr);
856 break;
857 }
858
859 default:
860 ret = -EINVAL;
861 break;
862 }
863
864 return ret;
865 }
866
867 long sys_arch_prctl(int code, unsigned long addr)
868 {
869 return do_arch_prctl(current, code, addr);
870 }
871
872 unsigned long arch_align_stack(unsigned long sp)
873 {
874 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
875 sp -= get_random_int() % 8192;
876 return sp & ~0xf;
877 }
878
879 unsigned long arch_randomize_brk(struct mm_struct *mm)
880 {
881 unsigned long range_end = mm->brk + 0x02000000;
882 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
883 }
This page took 0.049374 seconds and 5 git commands to generate.