2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
12 #include <linux/errno.h>
13 #include <linux/sched.h>
14 #include <linux/tick.h>
15 #include <linux/kernel.h>
17 #include <linux/stddef.h>
18 #include <linux/unistd.h>
19 #include <linux/export.h>
20 #include <linux/ptrace.h>
21 #include <linux/mman.h>
22 #include <linux/personality.h>
23 #include <linux/sys.h>
24 #include <linux/init.h>
25 #include <linux/completion.h>
26 #include <linux/kallsyms.h>
27 #include <linux/random.h>
28 #include <linux/prctl.h>
31 #include <asm/bootinfo.h>
36 #include <asm/pgtable.h>
37 #include <asm/mipsregs.h>
38 #include <asm/processor.h>
40 #include <asm/uaccess.h>
43 #include <asm/isadep.h>
45 #include <asm/stacktrace.h>
46 #include <asm/irq_regs.h>
48 #ifdef CONFIG_HOTPLUG_CPU
49 void arch_cpu_idle_dead(void)
51 /* What the heck is this check doing ? */
52 if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map
))
57 asmlinkage
void ret_from_fork(void);
58 asmlinkage
void ret_from_kernel_thread(void);
60 void start_thread(struct pt_regs
* regs
, unsigned long pc
, unsigned long sp
)
64 /* New thread loses kernel privileges. */
65 status
= regs
->cp0_status
& ~(ST0_CU0
|ST0_CU1
|ST0_FR
|KU_MASK
);
67 regs
->cp0_status
= status
;
69 clear_thread_flag(TIF_MSA_CTX_LIVE
);
76 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
79 * Save any process state which is live in hardware registers to the
80 * parent context prior to duplication. This prevents the new child
81 * state becoming stale if the parent is preempted before copy_thread()
82 * gets a chance to save the parent's live hardware registers to the
89 else if (is_fpu_owner())
101 * Copy architecture-specific thread state
103 int copy_thread(unsigned long clone_flags
, unsigned long usp
,
104 unsigned long kthread_arg
, struct task_struct
*p
)
106 struct thread_info
*ti
= task_thread_info(p
);
107 struct pt_regs
*childregs
, *regs
= current_pt_regs();
108 unsigned long childksp
;
109 p
->set_child_tid
= p
->clear_child_tid
= NULL
;
111 childksp
= (unsigned long)task_stack_page(p
) + THREAD_SIZE
- 32;
113 /* set up new TSS. */
114 childregs
= (struct pt_regs
*) childksp
- 1;
115 /* Put the stack after the struct pt_regs. */
116 childksp
= (unsigned long) childregs
;
117 p
->thread
.cp0_status
= read_c0_status() & ~(ST0_CU2
|ST0_CU1
);
118 if (unlikely(p
->flags
& PF_KTHREAD
)) {
120 unsigned long status
= p
->thread
.cp0_status
;
121 memset(childregs
, 0, sizeof(struct pt_regs
));
122 ti
->addr_limit
= KERNEL_DS
;
123 p
->thread
.reg16
= usp
; /* fn */
124 p
->thread
.reg17
= kthread_arg
;
125 p
->thread
.reg29
= childksp
;
126 p
->thread
.reg31
= (unsigned long) ret_from_kernel_thread
;
127 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
128 status
= (status
& ~(ST0_KUP
| ST0_IEP
| ST0_IEC
)) |
129 ((status
& (ST0_KUC
| ST0_IEC
)) << 2);
133 childregs
->cp0_status
= status
;
139 childregs
->regs
[7] = 0; /* Clear error flag */
140 childregs
->regs
[2] = 0; /* Child gets zero as return value */
142 childregs
->regs
[29] = usp
;
143 ti
->addr_limit
= USER_DS
;
145 p
->thread
.reg29
= (unsigned long) childregs
;
146 p
->thread
.reg31
= (unsigned long) ret_from_fork
;
149 * New tasks lose permission to use the fpu. This accelerates context
150 * switching for most programs since they don't use the fpu.
152 childregs
->cp0_status
&= ~(ST0_CU2
|ST0_CU1
);
154 clear_tsk_thread_flag(p
, TIF_USEDFPU
);
155 clear_tsk_thread_flag(p
, TIF_USEDMSA
);
156 clear_tsk_thread_flag(p
, TIF_MSA_CTX_LIVE
);
158 #ifdef CONFIG_MIPS_MT_FPAFF
159 clear_tsk_thread_flag(p
, TIF_FPUBOUND
);
160 #endif /* CONFIG_MIPS_MT_FPAFF */
162 if (clone_flags
& CLONE_SETTLS
)
163 ti
->tp_value
= regs
->regs
[7];
168 #ifdef CONFIG_CC_STACKPROTECTOR
169 #include <linux/stackprotector.h>
170 unsigned long __stack_chk_guard __read_mostly
;
171 EXPORT_SYMBOL(__stack_chk_guard
);
174 struct mips_frame_info
{
176 unsigned long func_size
;
181 #define J_TARGET(pc,target) \
182 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
184 static inline int is_ra_save_ins(union mips_instruction
*ip
)
186 #ifdef CONFIG_CPU_MICROMIPS
187 union mips_instruction mmi
;
191 * swm16 reglist,offset(sp)
192 * swm32 reglist,offset(sp)
194 * jradiussp - NOT SUPPORTED
196 * microMIPS is way more fun...
198 if (mm_insn_16bit(ip
->halfword
[0])) {
199 mmi
.word
= (ip
->halfword
[0] << 16);
200 return (mmi
.mm16_r5_format
.opcode
== mm_swsp16_op
&&
201 mmi
.mm16_r5_format
.rt
== 31) ||
202 (mmi
.mm16_m_format
.opcode
== mm_pool16c_op
&&
203 mmi
.mm16_m_format
.func
== mm_swm16_op
);
206 mmi
.halfword
[0] = ip
->halfword
[1];
207 mmi
.halfword
[1] = ip
->halfword
[0];
208 return (mmi
.mm_m_format
.opcode
== mm_pool32b_op
&&
209 mmi
.mm_m_format
.rd
> 9 &&
210 mmi
.mm_m_format
.base
== 29 &&
211 mmi
.mm_m_format
.func
== mm_swm32_func
) ||
212 (mmi
.i_format
.opcode
== mm_sw32_op
&&
213 mmi
.i_format
.rs
== 29 &&
214 mmi
.i_format
.rt
== 31);
217 /* sw / sd $ra, offset($sp) */
218 return (ip
->i_format
.opcode
== sw_op
|| ip
->i_format
.opcode
== sd_op
) &&
219 ip
->i_format
.rs
== 29 &&
220 ip
->i_format
.rt
== 31;
224 static inline int is_jump_ins(union mips_instruction
*ip
)
226 #ifdef CONFIG_CPU_MICROMIPS
228 * jr16,jrc,jalr16,jalr16
230 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
231 * jraddiusp - NOT SUPPORTED
233 * microMIPS is kind of more fun...
235 union mips_instruction mmi
;
237 mmi
.word
= (ip
->halfword
[0] << 16);
239 if ((mmi
.mm16_r5_format
.opcode
== mm_pool16c_op
&&
240 (mmi
.mm16_r5_format
.rt
& mm_jr16_op
) == mm_jr16_op
) ||
241 ip
->j_format
.opcode
== mm_jal32_op
)
243 if (ip
->r_format
.opcode
!= mm_pool32a_op
||
244 ip
->r_format
.func
!= mm_pool32axf_op
)
246 return ((ip
->u_format
.uimmediate
>> 6) & mm_jalr_op
) == mm_jalr_op
;
248 if (ip
->j_format
.opcode
== j_op
)
250 if (ip
->j_format
.opcode
== jal_op
)
252 if (ip
->r_format
.opcode
!= spec_op
)
254 return ip
->r_format
.func
== jalr_op
|| ip
->r_format
.func
== jr_op
;
258 static inline int is_sp_move_ins(union mips_instruction
*ip
)
260 #ifdef CONFIG_CPU_MICROMIPS
265 * jradiussp - NOT SUPPORTED
267 * microMIPS is not more fun...
269 if (mm_insn_16bit(ip
->halfword
[0])) {
270 union mips_instruction mmi
;
272 mmi
.word
= (ip
->halfword
[0] << 16);
273 return (mmi
.mm16_r3_format
.opcode
== mm_pool16d_op
&&
274 mmi
.mm16_r3_format
.simmediate
&& mm_addiusp_func
) ||
275 (mmi
.mm16_r5_format
.opcode
== mm_pool16d_op
&&
276 mmi
.mm16_r5_format
.rt
== 29);
278 return ip
->mm_i_format
.opcode
== mm_addiu32_op
&&
279 ip
->mm_i_format
.rt
== 29 && ip
->mm_i_format
.rs
== 29;
281 /* addiu/daddiu sp,sp,-imm */
282 if (ip
->i_format
.rs
!= 29 || ip
->i_format
.rt
!= 29)
284 if (ip
->i_format
.opcode
== addiu_op
|| ip
->i_format
.opcode
== daddiu_op
)
290 static int get_frame_info(struct mips_frame_info
*info
)
292 #ifdef CONFIG_CPU_MICROMIPS
293 union mips_instruction
*ip
= (void *) (((char *) info
->func
) - 1);
295 union mips_instruction
*ip
= info
->func
;
297 unsigned max_insns
= info
->func_size
/ sizeof(union mips_instruction
);
300 info
->pc_offset
= -1;
301 info
->frame_size
= 0;
307 max_insns
= 128U; /* unknown function size */
308 max_insns
= min(128U, max_insns
);
310 for (i
= 0; i
< max_insns
; i
++, ip
++) {
314 if (!info
->frame_size
) {
315 if (is_sp_move_ins(ip
))
317 #ifdef CONFIG_CPU_MICROMIPS
318 if (mm_insn_16bit(ip
->halfword
[0]))
322 if (ip
->halfword
[0] & mm_addiusp_func
)
324 tmp
= (((ip
->halfword
[0] >> 1) & 0x1ff) << 2);
325 info
->frame_size
= -(signed short)(tmp
| ((tmp
& 0x100) ? 0xfe00 : 0));
327 tmp
= (ip
->halfword
[0] >> 1);
328 info
->frame_size
= -(signed short)(tmp
& 0xf);
330 ip
= (void *) &ip
->halfword
[1];
334 info
->frame_size
= - ip
->i_format
.simmediate
;
338 if (info
->pc_offset
== -1 && is_ra_save_ins(ip
)) {
340 ip
->i_format
.simmediate
/ sizeof(long);
344 if (info
->frame_size
&& info
->pc_offset
>= 0) /* nested */
346 if (info
->pc_offset
< 0) /* leaf */
348 /* prologue seems boggus... */
353 static struct mips_frame_info schedule_mfi __read_mostly
;
355 #ifdef CONFIG_KALLSYMS
356 static unsigned long get___schedule_addr(void)
358 return kallsyms_lookup_name("__schedule");
361 static unsigned long get___schedule_addr(void)
363 union mips_instruction
*ip
= (void *)schedule
;
367 for (i
= 0; i
< max_insns
; i
++, ip
++) {
368 if (ip
->j_format
.opcode
== j_op
)
369 return J_TARGET(ip
, ip
->j_format
.target
);
375 static int __init
frame_info_init(void)
377 unsigned long size
= 0;
378 #ifdef CONFIG_KALLSYMS
383 addr
= get___schedule_addr();
385 addr
= (unsigned long)schedule
;
387 #ifdef CONFIG_KALLSYMS
388 kallsyms_lookup_size_offset(addr
, &size
, &ofs
);
390 schedule_mfi
.func
= (void *)addr
;
391 schedule_mfi
.func_size
= size
;
393 get_frame_info(&schedule_mfi
);
396 * Without schedule() frame info, result given by
397 * thread_saved_pc() and get_wchan() are not reliable.
399 if (schedule_mfi
.pc_offset
< 0)
400 printk("Can't analyze schedule() prologue at %p\n", schedule
);
405 arch_initcall(frame_info_init
);
408 * Return saved PC of a blocked thread.
410 unsigned long thread_saved_pc(struct task_struct
*tsk
)
412 struct thread_struct
*t
= &tsk
->thread
;
414 /* New born processes are a special case */
415 if (t
->reg31
== (unsigned long) ret_from_fork
)
417 if (schedule_mfi
.pc_offset
< 0)
419 return ((unsigned long *)t
->reg29
)[schedule_mfi
.pc_offset
];
423 #ifdef CONFIG_KALLSYMS
424 /* generic stack unwinding function */
425 unsigned long notrace
unwind_stack_by_address(unsigned long stack_page
,
430 struct mips_frame_info info
;
431 unsigned long size
, ofs
;
433 extern void ret_from_irq(void);
434 extern void ret_from_exception(void);
440 * If we reached the bottom of interrupt context,
441 * return saved pc in pt_regs.
443 if (pc
== (unsigned long)ret_from_irq
||
444 pc
== (unsigned long)ret_from_exception
) {
445 struct pt_regs
*regs
;
446 if (*sp
>= stack_page
&&
447 *sp
+ sizeof(*regs
) <= stack_page
+ THREAD_SIZE
- 32) {
448 regs
= (struct pt_regs
*)*sp
;
450 if (!user_mode(regs
) && __kernel_text_address(pc
)) {
451 *sp
= regs
->regs
[29];
452 *ra
= regs
->regs
[31];
458 if (!kallsyms_lookup_size_offset(pc
, &size
, &ofs
))
461 * Return ra if an exception occurred at the first instruction
463 if (unlikely(ofs
== 0)) {
469 info
.func
= (void *)(pc
- ofs
);
470 info
.func_size
= ofs
; /* analyze from start to ofs */
471 leaf
= get_frame_info(&info
);
475 if (*sp
< stack_page
||
476 *sp
+ info
.frame_size
> stack_page
+ THREAD_SIZE
- 32)
481 * For some extreme cases, get_frame_info() can
482 * consider wrongly a nested function as a leaf
483 * one. In that cases avoid to return always the
486 pc
= pc
!= *ra
? *ra
: 0;
488 pc
= ((unsigned long *)(*sp
))[info
.pc_offset
];
490 *sp
+= info
.frame_size
;
492 return __kernel_text_address(pc
) ? pc
: 0;
494 EXPORT_SYMBOL(unwind_stack_by_address
);
496 /* used by show_backtrace() */
497 unsigned long unwind_stack(struct task_struct
*task
, unsigned long *sp
,
498 unsigned long pc
, unsigned long *ra
)
500 unsigned long stack_page
= (unsigned long)task_stack_page(task
);
501 return unwind_stack_by_address(stack_page
, sp
, pc
, ra
);
506 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
508 unsigned long get_wchan(struct task_struct
*task
)
510 unsigned long pc
= 0;
511 #ifdef CONFIG_KALLSYMS
513 unsigned long ra
= 0;
516 if (!task
|| task
== current
|| task
->state
== TASK_RUNNING
)
518 if (!task_stack_page(task
))
521 pc
= thread_saved_pc(task
);
523 #ifdef CONFIG_KALLSYMS
524 sp
= task
->thread
.reg29
+ schedule_mfi
.frame_size
;
526 while (in_sched_functions(pc
))
527 pc
= unwind_stack(task
, &sp
, pc
, &ra
);
535 * Don't forget that the stack pointer must be aligned on a 8 bytes
536 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
538 unsigned long arch_align_stack(unsigned long sp
)
540 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
541 sp
-= get_random_int() & ~PAGE_MASK
;
546 static void arch_dump_stack(void *info
)
548 struct pt_regs
*regs
;
550 regs
= get_irq_regs();
558 void arch_trigger_all_cpu_backtrace(bool include_self
)
560 smp_call_function(arch_dump_stack
, NULL
, 1);
563 int mips_get_process_fp_mode(struct task_struct
*task
)
567 if (!test_tsk_thread_flag(task
, TIF_32BIT_FPREGS
))
568 value
|= PR_FP_MODE_FR
;
569 if (test_tsk_thread_flag(task
, TIF_HYBRID_FPREGS
))
570 value
|= PR_FP_MODE_FRE
;
575 static void prepare_for_fp_mode_switch(void *info
)
577 struct mm_struct
*mm
= info
;
579 if (current
->mm
== mm
)
583 int mips_set_process_fp_mode(struct task_struct
*task
, unsigned int value
)
585 const unsigned int known_bits
= PR_FP_MODE_FR
| PR_FP_MODE_FRE
;
586 struct task_struct
*t
;
589 /* Check the value is valid */
590 if (value
& ~known_bits
)
593 /* Avoid inadvertently triggering emulation */
594 if ((value
& PR_FP_MODE_FR
) && cpu_has_fpu
&&
595 !(current_cpu_data
.fpu_id
& MIPS_FPIR_F64
))
597 if ((value
& PR_FP_MODE_FRE
) && cpu_has_fpu
&& !cpu_has_fre
)
600 /* FR = 0 not supported in MIPS R6 */
601 if (!(value
& PR_FP_MODE_FR
) && cpu_has_fpu
&& cpu_has_mips_r6
)
604 /* Proceed with the mode switch */
607 /* Save FP & vector context, then disable FPU & MSA */
608 if (task
->signal
== current
->signal
)
611 /* Prevent any threads from obtaining live FP context */
612 atomic_set(&task
->mm
->context
.fp_mode_switching
, 1);
613 smp_mb__after_atomic();
616 * If there are multiple online CPUs then force any which are running
617 * threads in this process to lose their FPU context, which they can't
618 * regain until fp_mode_switching is cleared later.
620 if (num_online_cpus() > 1) {
621 /* No need to send an IPI for the local CPU */
622 max_users
= (task
->mm
== current
->mm
) ? 1 : 0;
624 if (atomic_read(¤t
->mm
->mm_users
) > max_users
)
625 smp_call_function(prepare_for_fp_mode_switch
,
626 (void *)current
->mm
, 1);
630 * There are now no threads of the process with live FP context, so it
631 * is safe to proceed with the FP mode switch.
633 for_each_thread(task
, t
) {
634 /* Update desired FP register width */
635 if (value
& PR_FP_MODE_FR
) {
636 clear_tsk_thread_flag(t
, TIF_32BIT_FPREGS
);
638 set_tsk_thread_flag(t
, TIF_32BIT_FPREGS
);
639 clear_tsk_thread_flag(t
, TIF_MSA_CTX_LIVE
);
642 /* Update desired FP single layout */
643 if (value
& PR_FP_MODE_FRE
)
644 set_tsk_thread_flag(t
, TIF_HYBRID_FPREGS
);
646 clear_tsk_thread_flag(t
, TIF_HYBRID_FPREGS
);
649 /* Allow threads to use FP again */
650 atomic_set(&task
->mm
->context
.fp_mode_switching
, 0);