2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/init.h>
29 #include <linux/prctl.h>
30 #include <linux/init_task.h>
31 #include <linux/module.h>
32 #include <linux/kallsyms.h>
33 #include <linux/mqueue.h>
34 #include <linux/hardirq.h>
35 #include <linux/utsname.h>
36 #include <linux/ftrace.h>
37 #include <linux/kernel_stat.h>
39 #include <asm/pgtable.h>
40 #include <asm/uaccess.h>
41 #include <asm/system.h>
43 #include <asm/processor.h>
46 #include <asm/machdep.h>
48 #include <asm/syscalls.h>
50 #include <asm/firmware.h>
52 #include <linux/kprobes.h>
53 #include <linux/kdebug.h>
55 extern unsigned long _get_SP(void);
58 struct task_struct
*last_task_used_math
= NULL
;
59 struct task_struct
*last_task_used_altivec
= NULL
;
60 struct task_struct
*last_task_used_vsx
= NULL
;
61 struct task_struct
*last_task_used_spe
= NULL
;
65 * Make sure the floating-point register state in the
66 * the thread_struct is up to date for task tsk.
68 void flush_fp_to_thread(struct task_struct
*tsk
)
70 if (tsk
->thread
.regs
) {
72 * We need to disable preemption here because if we didn't,
73 * another process could get scheduled after the regs->msr
74 * test but before we have finished saving the FP registers
75 * to the thread_struct. That process could take over the
76 * FPU, and then when we get scheduled again we would store
77 * bogus values for the remaining FP registers.
80 if (tsk
->thread
.regs
->msr
& MSR_FP
) {
83 * This should only ever be called for current or
84 * for a stopped child process. Since we save away
85 * the FP register state on context switch on SMP,
86 * there is something wrong if a stopped child appears
87 * to still have its FP state in the CPU registers.
89 BUG_ON(tsk
!= current
);
97 void enable_kernel_fp(void)
99 WARN_ON(preemptible());
102 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_FP
))
105 giveup_fpu(NULL
); /* just enables FP for kernel */
107 giveup_fpu(last_task_used_math
);
108 #endif /* CONFIG_SMP */
110 EXPORT_SYMBOL(enable_kernel_fp
);
112 #ifdef CONFIG_ALTIVEC
113 void enable_kernel_altivec(void)
115 WARN_ON(preemptible());
118 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_VEC
))
119 giveup_altivec(current
);
121 giveup_altivec(NULL
); /* just enable AltiVec for kernel - force */
123 giveup_altivec(last_task_used_altivec
);
124 #endif /* CONFIG_SMP */
126 EXPORT_SYMBOL(enable_kernel_altivec
);
129 * Make sure the VMX/Altivec register state in the
130 * the thread_struct is up to date for task tsk.
132 void flush_altivec_to_thread(struct task_struct
*tsk
)
134 if (tsk
->thread
.regs
) {
136 if (tsk
->thread
.regs
->msr
& MSR_VEC
) {
138 BUG_ON(tsk
!= current
);
145 #endif /* CONFIG_ALTIVEC */
149 /* not currently used, but some crazy RAID module might want to later */
150 void enable_kernel_vsx(void)
152 WARN_ON(preemptible());
155 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_VSX
))
158 giveup_vsx(NULL
); /* just enable vsx for kernel - force */
160 giveup_vsx(last_task_used_vsx
);
161 #endif /* CONFIG_SMP */
163 EXPORT_SYMBOL(enable_kernel_vsx
);
166 void giveup_vsx(struct task_struct
*tsk
)
173 void flush_vsx_to_thread(struct task_struct
*tsk
)
175 if (tsk
->thread
.regs
) {
177 if (tsk
->thread
.regs
->msr
& MSR_VSX
) {
179 BUG_ON(tsk
!= current
);
186 #endif /* CONFIG_VSX */
190 void enable_kernel_spe(void)
192 WARN_ON(preemptible());
195 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_SPE
))
198 giveup_spe(NULL
); /* just enable SPE for kernel - force */
200 giveup_spe(last_task_used_spe
);
201 #endif /* __SMP __ */
203 EXPORT_SYMBOL(enable_kernel_spe
);
205 void flush_spe_to_thread(struct task_struct
*tsk
)
207 if (tsk
->thread
.regs
) {
209 if (tsk
->thread
.regs
->msr
& MSR_SPE
) {
211 BUG_ON(tsk
!= current
);
218 #endif /* CONFIG_SPE */
222 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
223 * and the current task has some state, discard it.
225 void discard_lazy_cpu_state(void)
228 if (last_task_used_math
== current
)
229 last_task_used_math
= NULL
;
230 #ifdef CONFIG_ALTIVEC
231 if (last_task_used_altivec
== current
)
232 last_task_used_altivec
= NULL
;
233 #endif /* CONFIG_ALTIVEC */
235 if (last_task_used_vsx
== current
)
236 last_task_used_vsx
= NULL
;
237 #endif /* CONFIG_VSX */
239 if (last_task_used_spe
== current
)
240 last_task_used_spe
= NULL
;
244 #endif /* CONFIG_SMP */
246 void do_dabr(struct pt_regs
*regs
, unsigned long address
,
247 unsigned long error_code
)
251 if (notify_die(DIE_DABR_MATCH
, "dabr_match", regs
, error_code
,
252 11, SIGSEGV
) == NOTIFY_STOP
)
255 if (debugger_dabr_match(regs
))
258 /* Clear the DAC and struct entries. One shot trigger */
259 #if defined(CONFIG_BOOKE)
260 mtspr(SPRN_DBCR0
, mfspr(SPRN_DBCR0
) & ~(DBSR_DAC1R
| DBSR_DAC1W
267 /* Deliver the signal to userspace */
268 info
.si_signo
= SIGTRAP
;
270 info
.si_code
= TRAP_HWBKPT
;
271 info
.si_addr
= (void __user
*)address
;
272 force_sig_info(SIGTRAP
, &info
, current
);
275 static DEFINE_PER_CPU(unsigned long, current_dabr
);
277 int set_dabr(unsigned long dabr
)
279 __get_cpu_var(current_dabr
) = dabr
;
282 return ppc_md
.set_dabr(dabr
);
284 /* XXX should we have a CPU_FTR_HAS_DABR ? */
285 #if defined(CONFIG_PPC64) || defined(CONFIG_6xx)
286 mtspr(SPRN_DABR
, dabr
);
289 #if defined(CONFIG_BOOKE)
290 mtspr(SPRN_DAC1
, dabr
);
297 DEFINE_PER_CPU(struct cpu_usage
, cpu_usage_array
);
300 struct task_struct
*__switch_to(struct task_struct
*prev
,
301 struct task_struct
*new)
303 struct thread_struct
*new_thread
, *old_thread
;
305 struct task_struct
*last
;
308 /* avoid complexity of lazy save/restore of fpu
309 * by just saving it every time we switch out if
310 * this task used the fpu during the last quantum.
312 * If it tries to use the fpu again, it'll trap and
313 * reload its fp regs. So we don't have to do a restore
314 * every switch, just a save.
317 if (prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_FP
))
319 #ifdef CONFIG_ALTIVEC
321 * If the previous thread used altivec in the last quantum
322 * (thus changing altivec regs) then save them.
323 * We used to check the VRSAVE register but not all apps
324 * set it, so we don't rely on it now (and in fact we need
325 * to save & restore VSCR even if VRSAVE == 0). -- paulus
327 * On SMP we always save/restore altivec regs just to avoid the
328 * complexity of changing processors.
331 if (prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_VEC
))
332 giveup_altivec(prev
);
333 #endif /* CONFIG_ALTIVEC */
335 if (prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_VSX
))
336 /* VMX and FPU registers are already save here */
338 #endif /* CONFIG_VSX */
341 * If the previous thread used spe in the last quantum
342 * (thus changing spe regs) then save them.
344 * On SMP we always save/restore spe regs just to avoid the
345 * complexity of changing processors.
347 if ((prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_SPE
)))
349 #endif /* CONFIG_SPE */
351 #else /* CONFIG_SMP */
352 #ifdef CONFIG_ALTIVEC
353 /* Avoid the trap. On smp this this never happens since
354 * we don't set last_task_used_altivec -- Cort
356 if (new->thread
.regs
&& last_task_used_altivec
== new)
357 new->thread
.regs
->msr
|= MSR_VEC
;
358 #endif /* CONFIG_ALTIVEC */
360 if (new->thread
.regs
&& last_task_used_vsx
== new)
361 new->thread
.regs
->msr
|= MSR_VSX
;
362 #endif /* CONFIG_VSX */
364 /* Avoid the trap. On smp this this never happens since
365 * we don't set last_task_used_spe
367 if (new->thread
.regs
&& last_task_used_spe
== new)
368 new->thread
.regs
->msr
|= MSR_SPE
;
369 #endif /* CONFIG_SPE */
371 #endif /* CONFIG_SMP */
373 if (unlikely(__get_cpu_var(current_dabr
) != new->thread
.dabr
))
374 set_dabr(new->thread
.dabr
);
376 #if defined(CONFIG_BOOKE)
377 /* If new thread DAC (HW breakpoint) is the same then leave it */
378 if (new->thread
.dabr
)
379 set_dabr(new->thread
.dabr
);
382 new_thread
= &new->thread
;
383 old_thread
= ¤t
->thread
;
387 * Collect processor utilization data per process
389 if (firmware_has_feature(FW_FEATURE_SPLPAR
)) {
390 struct cpu_usage
*cu
= &__get_cpu_var(cpu_usage_array
);
391 long unsigned start_tb
, current_tb
;
392 start_tb
= old_thread
->start_tb
;
393 cu
->current_tb
= current_tb
= mfspr(SPRN_PURR
);
394 old_thread
->accum_tb
+= (current_tb
- start_tb
);
395 new_thread
->start_tb
= current_tb
;
399 local_irq_save(flags
);
401 account_system_vtime(current
);
402 account_process_vtime(current
);
403 calculate_steal_time();
406 * We can't take a PMU exception inside _switch() since there is a
407 * window where the kernel stack SLB and the kernel stack are out
408 * of sync. Hard disable here.
411 last
= _switch(old_thread
, new_thread
);
413 local_irq_restore(flags
);
418 static int instructions_to_print
= 16;
420 static void show_instructions(struct pt_regs
*regs
)
423 unsigned long pc
= regs
->nip
- (instructions_to_print
* 3 / 4 *
426 printk("Instruction dump:");
428 for (i
= 0; i
< instructions_to_print
; i
++) {
434 #if !defined(CONFIG_BOOKE)
435 /* If executing with the IMMU off, adjust pc rather
436 * than print XXXXXXXX.
438 if (!(regs
->msr
& MSR_IR
))
439 pc
= (unsigned long)phys_to_virt(pc
);
442 /* We use __get_user here *only* to avoid an OOPS on a
443 * bad address because the pc *should* only be a
446 if (!__kernel_text_address(pc
) ||
447 __get_user(instr
, (unsigned int __user
*)pc
)) {
451 printk("<%08x> ", instr
);
453 printk("%08x ", instr
);
462 static struct regbit
{
479 static void printbits(unsigned long val
, struct regbit
*bits
)
481 const char *sep
= "";
484 for (; bits
->bit
; ++bits
)
485 if (val
& bits
->bit
) {
486 printk("%s%s", sep
, bits
->name
);
494 #define REGS_PER_LINE 4
495 #define LAST_VOLATILE 13
498 #define REGS_PER_LINE 8
499 #define LAST_VOLATILE 12
502 void show_regs(struct pt_regs
* regs
)
506 printk("NIP: "REG
" LR: "REG
" CTR: "REG
"\n",
507 regs
->nip
, regs
->link
, regs
->ctr
);
508 printk("REGS: %p TRAP: %04lx %s (%s)\n",
509 regs
, regs
->trap
, print_tainted(), init_utsname()->release
);
510 printk("MSR: "REG
" ", regs
->msr
);
511 printbits(regs
->msr
, msr_bits
);
512 printk(" CR: %08lx XER: %08lx\n", regs
->ccr
, regs
->xer
);
514 if (trap
== 0x300 || trap
== 0x600)
515 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
516 printk("DEAR: "REG
", ESR: "REG
"\n", regs
->dar
, regs
->dsisr
);
518 printk("DAR: "REG
", DSISR: "REG
"\n", regs
->dar
, regs
->dsisr
);
520 printk("TASK = %p[%d] '%s' THREAD: %p",
521 current
, task_pid_nr(current
), current
->comm
, task_thread_info(current
));
524 printk(" CPU: %d", raw_smp_processor_id());
525 #endif /* CONFIG_SMP */
527 for (i
= 0; i
< 32; i
++) {
528 if ((i
% REGS_PER_LINE
) == 0)
529 printk("\n" KERN_INFO
"GPR%02d: ", i
);
530 printk(REG
" ", regs
->gpr
[i
]);
531 if (i
== LAST_VOLATILE
&& !FULL_REGS(regs
))
535 #ifdef CONFIG_KALLSYMS
537 * Lookup NIP late so we have the best change of getting the
538 * above info out without failing
540 printk("NIP ["REG
"] %pS\n", regs
->nip
, (void *)regs
->nip
);
541 printk("LR ["REG
"] %pS\n", regs
->link
, (void *)regs
->link
);
543 show_stack(current
, (unsigned long *) regs
->gpr
[1]);
544 if (!user_mode(regs
))
545 show_instructions(regs
);
548 void exit_thread(void)
550 discard_lazy_cpu_state();
553 void flush_thread(void)
556 struct thread_info
*t
= current_thread_info();
558 if (test_ti_thread_flag(t
, TIF_ABI_PENDING
)) {
559 clear_ti_thread_flag(t
, TIF_ABI_PENDING
);
560 if (test_ti_thread_flag(t
, TIF_32BIT
))
561 clear_ti_thread_flag(t
, TIF_32BIT
);
563 set_ti_thread_flag(t
, TIF_32BIT
);
567 discard_lazy_cpu_state();
569 if (current
->thread
.dabr
) {
570 current
->thread
.dabr
= 0;
573 #if defined(CONFIG_BOOKE)
574 current
->thread
.dbcr0
&= ~(DBSR_DAC1R
| DBSR_DAC1W
);
580 release_thread(struct task_struct
*t
)
585 * This gets called before we allocate a new thread and copy
586 * the current task into it.
588 void prepare_to_copy(struct task_struct
*tsk
)
590 flush_fp_to_thread(current
);
591 flush_altivec_to_thread(current
);
592 flush_vsx_to_thread(current
);
593 flush_spe_to_thread(current
);
599 int copy_thread(int nr
, unsigned long clone_flags
, unsigned long usp
,
600 unsigned long unused
, struct task_struct
*p
,
601 struct pt_regs
*regs
)
603 struct pt_regs
*childregs
, *kregs
;
604 extern void ret_from_fork(void);
605 unsigned long sp
= (unsigned long)task_stack_page(p
) + THREAD_SIZE
;
607 CHECK_FULL_REGS(regs
);
609 sp
-= sizeof(struct pt_regs
);
610 childregs
= (struct pt_regs
*) sp
;
612 if ((childregs
->msr
& MSR_PR
) == 0) {
613 /* for kernel thread, set `current' and stackptr in new task */
614 childregs
->gpr
[1] = sp
+ sizeof(struct pt_regs
);
616 childregs
->gpr
[2] = (unsigned long) p
;
618 clear_tsk_thread_flag(p
, TIF_32BIT
);
620 p
->thread
.regs
= NULL
; /* no user register state */
622 childregs
->gpr
[1] = usp
;
623 p
->thread
.regs
= childregs
;
624 if (clone_flags
& CLONE_SETTLS
) {
626 if (!test_thread_flag(TIF_32BIT
))
627 childregs
->gpr
[13] = childregs
->gpr
[6];
630 childregs
->gpr
[2] = childregs
->gpr
[6];
633 childregs
->gpr
[3] = 0; /* Result from fork() */
634 sp
-= STACK_FRAME_OVERHEAD
;
637 * The way this works is that at some point in the future
638 * some task will call _switch to switch to the new task.
639 * That will pop off the stack frame created below and start
640 * the new task running at ret_from_fork. The new task will
641 * do some house keeping and then return from the fork or clone
642 * system call, using the stack frame created above.
644 sp
-= sizeof(struct pt_regs
);
645 kregs
= (struct pt_regs
*) sp
;
646 sp
-= STACK_FRAME_OVERHEAD
;
648 p
->thread
.ksp_limit
= (unsigned long)task_stack_page(p
) +
649 _ALIGN_UP(sizeof(struct thread_info
), 16);
652 if (cpu_has_feature(CPU_FTR_SLB
)) {
653 unsigned long sp_vsid
;
654 unsigned long llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
656 if (cpu_has_feature(CPU_FTR_1T_SEGMENT
))
657 sp_vsid
= get_kernel_vsid(sp
, MMU_SEGSIZE_1T
)
658 << SLB_VSID_SHIFT_1T
;
660 sp_vsid
= get_kernel_vsid(sp
, MMU_SEGSIZE_256M
)
662 sp_vsid
|= SLB_VSID_KERNEL
| llp
;
663 p
->thread
.ksp_vsid
= sp_vsid
;
667 * The PPC64 ABI makes use of a TOC to contain function
668 * pointers. The function (ret_from_except) is actually a pointer
669 * to the TOC entry. The first entry is a pointer to the actual
672 kregs
->nip
= *((unsigned long *)ret_from_fork
);
674 kregs
->nip
= (unsigned long)ret_from_fork
;
681 * Set up a thread for executing a new program
683 void start_thread(struct pt_regs
*regs
, unsigned long start
, unsigned long sp
)
686 unsigned long load_addr
= regs
->gpr
[2]; /* saved by ELF_PLAT_INIT */
692 * If we exec out of a kernel thread then thread.regs will not be
695 if (!current
->thread
.regs
) {
696 struct pt_regs
*regs
= task_stack_page(current
) + THREAD_SIZE
;
697 current
->thread
.regs
= regs
- 1;
700 memset(regs
->gpr
, 0, sizeof(regs
->gpr
));
708 * We have just cleared all the nonvolatile GPRs, so make
709 * FULL_REGS(regs) return true. This is necessary to allow
710 * ptrace to examine the thread immediately after exec.
717 regs
->msr
= MSR_USER
;
719 if (!test_thread_flag(TIF_32BIT
)) {
720 unsigned long entry
, toc
;
722 /* start is a relocated pointer to the function descriptor for
723 * the elf _start routine. The first entry in the function
724 * descriptor is the entry address of _start and the second
725 * entry is the TOC value we need to use.
727 __get_user(entry
, (unsigned long __user
*)start
);
728 __get_user(toc
, (unsigned long __user
*)start
+1);
730 /* Check whether the e_entry function descriptor entries
731 * need to be relocated before we can use them.
733 if (load_addr
!= 0) {
739 regs
->msr
= MSR_USER64
;
743 regs
->msr
= MSR_USER32
;
747 discard_lazy_cpu_state();
749 current
->thread
.used_vsr
= 0;
751 memset(current
->thread
.fpr
, 0, sizeof(current
->thread
.fpr
));
752 current
->thread
.fpscr
.val
= 0;
753 #ifdef CONFIG_ALTIVEC
754 memset(current
->thread
.vr
, 0, sizeof(current
->thread
.vr
));
755 memset(¤t
->thread
.vscr
, 0, sizeof(current
->thread
.vscr
));
756 current
->thread
.vscr
.u
[3] = 0x00010000; /* Java mode disabled */
757 current
->thread
.vrsave
= 0;
758 current
->thread
.used_vr
= 0;
759 #endif /* CONFIG_ALTIVEC */
761 memset(current
->thread
.evr
, 0, sizeof(current
->thread
.evr
));
762 current
->thread
.acc
= 0;
763 current
->thread
.spefscr
= 0;
764 current
->thread
.used_spe
= 0;
765 #endif /* CONFIG_SPE */
768 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
769 | PR_FP_EXC_RES | PR_FP_EXC_INV)
771 int set_fpexc_mode(struct task_struct
*tsk
, unsigned int val
)
773 struct pt_regs
*regs
= tsk
->thread
.regs
;
775 /* This is a bit hairy. If we are an SPE enabled processor
776 * (have embedded fp) we store the IEEE exception enable flags in
777 * fpexc_mode. fpexc_mode is also used for setting FP exception
778 * mode (asyn, precise, disabled) for 'Classic' FP. */
779 if (val
& PR_FP_EXC_SW_ENABLE
) {
781 if (cpu_has_feature(CPU_FTR_SPE
)) {
782 tsk
->thread
.fpexc_mode
= val
&
783 (PR_FP_EXC_SW_ENABLE
| PR_FP_ALL_EXCEPT
);
793 /* on a CONFIG_SPE this does not hurt us. The bits that
794 * __pack_fe01 use do not overlap with bits used for
795 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
796 * on CONFIG_SPE implementations are reserved so writing to
797 * them does not change anything */
798 if (val
> PR_FP_EXC_PRECISE
)
800 tsk
->thread
.fpexc_mode
= __pack_fe01(val
);
801 if (regs
!= NULL
&& (regs
->msr
& MSR_FP
) != 0)
802 regs
->msr
= (regs
->msr
& ~(MSR_FE0
|MSR_FE1
))
803 | tsk
->thread
.fpexc_mode
;
807 int get_fpexc_mode(struct task_struct
*tsk
, unsigned long adr
)
811 if (tsk
->thread
.fpexc_mode
& PR_FP_EXC_SW_ENABLE
)
813 if (cpu_has_feature(CPU_FTR_SPE
))
814 val
= tsk
->thread
.fpexc_mode
;
821 val
= __unpack_fe01(tsk
->thread
.fpexc_mode
);
822 return put_user(val
, (unsigned int __user
*) adr
);
825 int set_endian(struct task_struct
*tsk
, unsigned int val
)
827 struct pt_regs
*regs
= tsk
->thread
.regs
;
829 if ((val
== PR_ENDIAN_LITTLE
&& !cpu_has_feature(CPU_FTR_REAL_LE
)) ||
830 (val
== PR_ENDIAN_PPC_LITTLE
&& !cpu_has_feature(CPU_FTR_PPC_LE
)))
836 if (val
== PR_ENDIAN_BIG
)
837 regs
->msr
&= ~MSR_LE
;
838 else if (val
== PR_ENDIAN_LITTLE
|| val
== PR_ENDIAN_PPC_LITTLE
)
846 int get_endian(struct task_struct
*tsk
, unsigned long adr
)
848 struct pt_regs
*regs
= tsk
->thread
.regs
;
851 if (!cpu_has_feature(CPU_FTR_PPC_LE
) &&
852 !cpu_has_feature(CPU_FTR_REAL_LE
))
858 if (regs
->msr
& MSR_LE
) {
859 if (cpu_has_feature(CPU_FTR_REAL_LE
))
860 val
= PR_ENDIAN_LITTLE
;
862 val
= PR_ENDIAN_PPC_LITTLE
;
866 return put_user(val
, (unsigned int __user
*)adr
);
869 int set_unalign_ctl(struct task_struct
*tsk
, unsigned int val
)
871 tsk
->thread
.align_ctl
= val
;
875 int get_unalign_ctl(struct task_struct
*tsk
, unsigned long adr
)
877 return put_user(tsk
->thread
.align_ctl
, (unsigned int __user
*)adr
);
880 #define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
882 int sys_clone(unsigned long clone_flags
, unsigned long usp
,
883 int __user
*parent_tidp
, void __user
*child_threadptr
,
884 int __user
*child_tidp
, int p6
,
885 struct pt_regs
*regs
)
887 CHECK_FULL_REGS(regs
);
889 usp
= regs
->gpr
[1]; /* stack pointer for child */
891 if (test_thread_flag(TIF_32BIT
)) {
892 parent_tidp
= TRUNC_PTR(parent_tidp
);
893 child_tidp
= TRUNC_PTR(child_tidp
);
896 return do_fork(clone_flags
, usp
, regs
, 0, parent_tidp
, child_tidp
);
899 int sys_fork(unsigned long p1
, unsigned long p2
, unsigned long p3
,
900 unsigned long p4
, unsigned long p5
, unsigned long p6
,
901 struct pt_regs
*regs
)
903 CHECK_FULL_REGS(regs
);
904 return do_fork(SIGCHLD
, regs
->gpr
[1], regs
, 0, NULL
, NULL
);
907 int sys_vfork(unsigned long p1
, unsigned long p2
, unsigned long p3
,
908 unsigned long p4
, unsigned long p5
, unsigned long p6
,
909 struct pt_regs
*regs
)
911 CHECK_FULL_REGS(regs
);
912 return do_fork(CLONE_VFORK
| CLONE_VM
| SIGCHLD
, regs
->gpr
[1],
913 regs
, 0, NULL
, NULL
);
916 int sys_execve(unsigned long a0
, unsigned long a1
, unsigned long a2
,
917 unsigned long a3
, unsigned long a4
, unsigned long a5
,
918 struct pt_regs
*regs
)
923 filename
= getname((char __user
*) a0
);
924 error
= PTR_ERR(filename
);
925 if (IS_ERR(filename
))
927 flush_fp_to_thread(current
);
928 flush_altivec_to_thread(current
);
929 flush_spe_to_thread(current
);
930 error
= do_execve(filename
, (char __user
* __user
*) a1
,
931 (char __user
* __user
*) a2
, regs
);
937 #ifdef CONFIG_IRQSTACKS
938 static inline int valid_irq_stack(unsigned long sp
, struct task_struct
*p
,
939 unsigned long nbytes
)
941 unsigned long stack_page
;
942 unsigned long cpu
= task_cpu(p
);
945 * Avoid crashing if the stack has overflowed and corrupted
946 * task_cpu(p), which is in the thread_info struct.
948 if (cpu
< NR_CPUS
&& cpu_possible(cpu
)) {
949 stack_page
= (unsigned long) hardirq_ctx
[cpu
];
950 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
951 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
954 stack_page
= (unsigned long) softirq_ctx
[cpu
];
955 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
956 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
963 #define valid_irq_stack(sp, p, nb) 0
964 #endif /* CONFIG_IRQSTACKS */
966 int validate_sp(unsigned long sp
, struct task_struct
*p
,
967 unsigned long nbytes
)
969 unsigned long stack_page
= (unsigned long)task_stack_page(p
);
971 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
972 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
975 return valid_irq_stack(sp
, p
, nbytes
);
978 EXPORT_SYMBOL(validate_sp
);
980 unsigned long get_wchan(struct task_struct
*p
)
982 unsigned long ip
, sp
;
985 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
989 if (!validate_sp(sp
, p
, STACK_FRAME_OVERHEAD
))
993 sp
= *(unsigned long *)sp
;
994 if (!validate_sp(sp
, p
, STACK_FRAME_OVERHEAD
))
997 ip
= ((unsigned long *)sp
)[STACK_FRAME_LR_SAVE
];
998 if (!in_sched_functions(ip
))
1001 } while (count
++ < 16);
1005 static int kstack_depth_to_print
= CONFIG_PRINT_STACK_DEPTH
;
1007 void show_stack(struct task_struct
*tsk
, unsigned long *stack
)
1009 unsigned long sp
, ip
, lr
, newsp
;
1012 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1013 int curr_frame
= current
->curr_ret_stack
;
1014 extern void return_to_handler(void);
1015 unsigned long addr
= (unsigned long)return_to_handler
;
1017 addr
= *(unsigned long*)addr
;
1021 sp
= (unsigned long) stack
;
1026 asm("mr %0,1" : "=r" (sp
));
1028 sp
= tsk
->thread
.ksp
;
1032 printk("Call Trace:\n");
1034 if (!validate_sp(sp
, tsk
, STACK_FRAME_OVERHEAD
))
1037 stack
= (unsigned long *) sp
;
1039 ip
= stack
[STACK_FRAME_LR_SAVE
];
1040 if (!firstframe
|| ip
!= lr
) {
1041 printk("["REG
"] ["REG
"] %pS", sp
, ip
, (void *)ip
);
1042 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1043 if (ip
== addr
&& curr_frame
>= 0) {
1045 (void *)current
->ret_stack
[curr_frame
].ret
);
1050 printk(" (unreliable)");
1056 * See if this is an exception frame.
1057 * We look for the "regshere" marker in the current frame.
1059 if (validate_sp(sp
, tsk
, STACK_INT_FRAME_SIZE
)
1060 && stack
[STACK_FRAME_MARKER
] == STACK_FRAME_REGS_MARKER
) {
1061 struct pt_regs
*regs
= (struct pt_regs
*)
1062 (sp
+ STACK_FRAME_OVERHEAD
);
1064 printk("--- Exception: %lx at %pS\n LR = %pS\n",
1065 regs
->trap
, (void *)regs
->nip
, (void *)lr
);
1070 } while (count
++ < kstack_depth_to_print
);
1073 void dump_stack(void)
1075 show_stack(current
, NULL
);
1077 EXPORT_SYMBOL(dump_stack
);
1080 void ppc64_runlatch_on(void)
1084 if (cpu_has_feature(CPU_FTR_CTRL
) && !test_thread_flag(TIF_RUNLATCH
)) {
1087 ctrl
= mfspr(SPRN_CTRLF
);
1088 ctrl
|= CTRL_RUNLATCH
;
1089 mtspr(SPRN_CTRLT
, ctrl
);
1091 set_thread_flag(TIF_RUNLATCH
);
1095 void ppc64_runlatch_off(void)
1099 if (cpu_has_feature(CPU_FTR_CTRL
) && test_thread_flag(TIF_RUNLATCH
)) {
1102 clear_thread_flag(TIF_RUNLATCH
);
1104 ctrl
= mfspr(SPRN_CTRLF
);
1105 ctrl
&= ~CTRL_RUNLATCH
;
1106 mtspr(SPRN_CTRLT
, ctrl
);
1111 #if THREAD_SHIFT < PAGE_SHIFT
1113 static struct kmem_cache
*thread_info_cache
;
1115 struct thread_info
*alloc_thread_info(struct task_struct
*tsk
)
1117 struct thread_info
*ti
;
1119 ti
= kmem_cache_alloc(thread_info_cache
, GFP_KERNEL
);
1120 if (unlikely(ti
== NULL
))
1122 #ifdef CONFIG_DEBUG_STACK_USAGE
1123 memset(ti
, 0, THREAD_SIZE
);
1128 void free_thread_info(struct thread_info
*ti
)
1130 kmem_cache_free(thread_info_cache
, ti
);
1133 void thread_info_cache_init(void)
1135 thread_info_cache
= kmem_cache_create("thread_info", THREAD_SIZE
,
1136 THREAD_SIZE
, 0, NULL
);
1137 BUG_ON(thread_info_cache
== NULL
);
1140 #endif /* THREAD_SHIFT < PAGE_SHIFT */
This page took 0.053874 seconds and 6 git commands to generate.