Merge tag 'for-linux-3.3-merge-window' of git://linux-c6x.org/git/projects/linux...
[deliverable/linux.git] / arch / mips / kernel / process.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 */
11 #include <linux/errno.h>
12 #include <linux/sched.h>
13 #include <linux/tick.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/stddef.h>
17 #include <linux/unistd.h>
18 #include <linux/export.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
21 #include <linux/personality.h>
22 #include <linux/sys.h>
23 #include <linux/user.h>
24 #include <linux/init.h>
25 #include <linux/completion.h>
26 #include <linux/kallsyms.h>
27 #include <linux/random.h>
28
29 #include <asm/asm.h>
30 #include <asm/bootinfo.h>
31 #include <asm/cpu.h>
32 #include <asm/dsp.h>
33 #include <asm/fpu.h>
34 #include <asm/pgtable.h>
35 #include <asm/system.h>
36 #include <asm/mipsregs.h>
37 #include <asm/processor.h>
38 #include <asm/uaccess.h>
39 #include <asm/io.h>
40 #include <asm/elf.h>
41 #include <asm/isadep.h>
42 #include <asm/inst.h>
43 #include <asm/stacktrace.h>
44
45 /*
46 * The idle thread. There's no useful work to be done, so just try to conserve
47 * power and have a low exit latency (ie sit in a loop waiting for somebody to
48 * say that they'd like to reschedule)
49 */
50 void __noreturn cpu_idle(void)
51 {
52 int cpu;
53
54 /* CPU is going idle. */
55 cpu = smp_processor_id();
56
57 /* endless idle loop with no priority at all */
58 while (1) {
59 tick_nohz_idle_enter();
60 rcu_idle_enter();
61 while (!need_resched() && cpu_online(cpu)) {
62 #ifdef CONFIG_MIPS_MT_SMTC
63 extern void smtc_idle_loop_hook(void);
64
65 smtc_idle_loop_hook();
66 #endif
67
68 if (cpu_wait) {
69 /* Don't trace irqs off for idle */
70 stop_critical_timings();
71 (*cpu_wait)();
72 start_critical_timings();
73 }
74 }
75 #ifdef CONFIG_HOTPLUG_CPU
76 if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) &&
77 (system_state == SYSTEM_RUNNING ||
78 system_state == SYSTEM_BOOTING))
79 play_dead();
80 #endif
81 rcu_idle_exit();
82 tick_nohz_idle_exit();
83 preempt_enable_no_resched();
84 schedule();
85 preempt_disable();
86 }
87 }
88
89 asmlinkage void ret_from_fork(void);
90
91 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
92 {
93 unsigned long status;
94
95 /* New thread loses kernel privileges. */
96 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
97 #ifdef CONFIG_64BIT
98 status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR;
99 #endif
100 status |= KU_USER;
101 regs->cp0_status = status;
102 clear_used_math();
103 clear_fpu_owner();
104 if (cpu_has_dsp)
105 __init_dsp();
106 regs->cp0_epc = pc;
107 regs->regs[29] = sp;
108 }
109
110 void exit_thread(void)
111 {
112 }
113
114 void flush_thread(void)
115 {
116 }
117
118 int copy_thread(unsigned long clone_flags, unsigned long usp,
119 unsigned long unused, struct task_struct *p, struct pt_regs *regs)
120 {
121 struct thread_info *ti = task_thread_info(p);
122 struct pt_regs *childregs;
123 unsigned long childksp;
124 p->set_child_tid = p->clear_child_tid = NULL;
125
126 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
127
128 preempt_disable();
129
130 if (is_fpu_owner())
131 save_fp(p);
132
133 if (cpu_has_dsp)
134 save_dsp(p);
135
136 preempt_enable();
137
138 /* set up new TSS. */
139 childregs = (struct pt_regs *) childksp - 1;
140 /* Put the stack after the struct pt_regs. */
141 childksp = (unsigned long) childregs;
142 *childregs = *regs;
143 childregs->regs[7] = 0; /* Clear error flag */
144
145 childregs->regs[2] = 0; /* Child gets zero as return value */
146
147 if (childregs->cp0_status & ST0_CU0) {
148 childregs->regs[28] = (unsigned long) ti;
149 childregs->regs[29] = childksp;
150 ti->addr_limit = KERNEL_DS;
151 } else {
152 childregs->regs[29] = usp;
153 ti->addr_limit = USER_DS;
154 }
155 p->thread.reg29 = (unsigned long) childregs;
156 p->thread.reg31 = (unsigned long) ret_from_fork;
157
158 /*
159 * New tasks lose permission to use the fpu. This accelerates context
160 * switching for most programs since they don't use the fpu.
161 */
162 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
163 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
164
165 #ifdef CONFIG_MIPS_MT_SMTC
166 /*
167 * SMTC restores TCStatus after Status, and the CU bits
168 * are aliased there.
169 */
170 childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
171 #endif
172 clear_tsk_thread_flag(p, TIF_USEDFPU);
173
174 #ifdef CONFIG_MIPS_MT_FPAFF
175 clear_tsk_thread_flag(p, TIF_FPUBOUND);
176 #endif /* CONFIG_MIPS_MT_FPAFF */
177
178 if (clone_flags & CLONE_SETTLS)
179 ti->tp_value = regs->regs[7];
180
181 return 0;
182 }
183
184 /* Fill in the fpu structure for a core dump.. */
185 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
186 {
187 memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu));
188
189 return 1;
190 }
191
192 void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs)
193 {
194 int i;
195
196 for (i = 0; i < EF_R0; i++)
197 gp[i] = 0;
198 gp[EF_R0] = 0;
199 for (i = 1; i <= 31; i++)
200 gp[EF_R0 + i] = regs->regs[i];
201 gp[EF_R26] = 0;
202 gp[EF_R27] = 0;
203 gp[EF_LO] = regs->lo;
204 gp[EF_HI] = regs->hi;
205 gp[EF_CP0_EPC] = regs->cp0_epc;
206 gp[EF_CP0_BADVADDR] = regs->cp0_badvaddr;
207 gp[EF_CP0_STATUS] = regs->cp0_status;
208 gp[EF_CP0_CAUSE] = regs->cp0_cause;
209 #ifdef EF_UNUSED0
210 gp[EF_UNUSED0] = 0;
211 #endif
212 }
213
214 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
215 {
216 elf_dump_regs(*regs, task_pt_regs(tsk));
217 return 1;
218 }
219
220 int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
221 {
222 memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
223
224 return 1;
225 }
226
227 /*
228 * Create a kernel thread
229 */
230 static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *))
231 {
232 do_exit(fn(arg));
233 }
234
235 long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
236 {
237 struct pt_regs regs;
238
239 memset(&regs, 0, sizeof(regs));
240
241 regs.regs[4] = (unsigned long) arg;
242 regs.regs[5] = (unsigned long) fn;
243 regs.cp0_epc = (unsigned long) kernel_thread_helper;
244 regs.cp0_status = read_c0_status();
245 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
246 regs.cp0_status = (regs.cp0_status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
247 ((regs.cp0_status & (ST0_KUC | ST0_IEC)) << 2);
248 #else
249 regs.cp0_status |= ST0_EXL;
250 #endif
251
252 /* Ok, create the new process.. */
253 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
254 }
255
256 /*
257 *
258 */
259 struct mips_frame_info {
260 void *func;
261 unsigned long func_size;
262 int frame_size;
263 int pc_offset;
264 };
265
266 static inline int is_ra_save_ins(union mips_instruction *ip)
267 {
268 /* sw / sd $ra, offset($sp) */
269 return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
270 ip->i_format.rs == 29 &&
271 ip->i_format.rt == 31;
272 }
273
274 static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
275 {
276 if (ip->j_format.opcode == jal_op)
277 return 1;
278 if (ip->r_format.opcode != spec_op)
279 return 0;
280 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
281 }
282
283 static inline int is_sp_move_ins(union mips_instruction *ip)
284 {
285 /* addiu/daddiu sp,sp,-imm */
286 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
287 return 0;
288 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
289 return 1;
290 return 0;
291 }
292
293 static int get_frame_info(struct mips_frame_info *info)
294 {
295 union mips_instruction *ip = info->func;
296 unsigned max_insns = info->func_size / sizeof(union mips_instruction);
297 unsigned i;
298
299 info->pc_offset = -1;
300 info->frame_size = 0;
301
302 if (!ip)
303 goto err;
304
305 if (max_insns == 0)
306 max_insns = 128U; /* unknown function size */
307 max_insns = min(128U, max_insns);
308
309 for (i = 0; i < max_insns; i++, ip++) {
310
311 if (is_jal_jalr_jr_ins(ip))
312 break;
313 if (!info->frame_size) {
314 if (is_sp_move_ins(ip))
315 info->frame_size = - ip->i_format.simmediate;
316 continue;
317 }
318 if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
319 info->pc_offset =
320 ip->i_format.simmediate / sizeof(long);
321 break;
322 }
323 }
324 if (info->frame_size && info->pc_offset >= 0) /* nested */
325 return 0;
326 if (info->pc_offset < 0) /* leaf */
327 return 1;
328 /* prologue seems boggus... */
329 err:
330 return -1;
331 }
332
333 static struct mips_frame_info schedule_mfi __read_mostly;
334
335 static int __init frame_info_init(void)
336 {
337 unsigned long size = 0;
338 #ifdef CONFIG_KALLSYMS
339 unsigned long ofs;
340
341 kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs);
342 #endif
343 schedule_mfi.func = schedule;
344 schedule_mfi.func_size = size;
345
346 get_frame_info(&schedule_mfi);
347
348 /*
349 * Without schedule() frame info, result given by
350 * thread_saved_pc() and get_wchan() are not reliable.
351 */
352 if (schedule_mfi.pc_offset < 0)
353 printk("Can't analyze schedule() prologue at %p\n", schedule);
354
355 return 0;
356 }
357
358 arch_initcall(frame_info_init);
359
360 /*
361 * Return saved PC of a blocked thread.
362 */
363 unsigned long thread_saved_pc(struct task_struct *tsk)
364 {
365 struct thread_struct *t = &tsk->thread;
366
367 /* New born processes are a special case */
368 if (t->reg31 == (unsigned long) ret_from_fork)
369 return t->reg31;
370 if (schedule_mfi.pc_offset < 0)
371 return 0;
372 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
373 }
374
375
376 #ifdef CONFIG_KALLSYMS
377 /* generic stack unwinding function */
378 unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
379 unsigned long *sp,
380 unsigned long pc,
381 unsigned long *ra)
382 {
383 struct mips_frame_info info;
384 unsigned long size, ofs;
385 int leaf;
386 extern void ret_from_irq(void);
387 extern void ret_from_exception(void);
388
389 if (!stack_page)
390 return 0;
391
392 /*
393 * If we reached the bottom of interrupt context,
394 * return saved pc in pt_regs.
395 */
396 if (pc == (unsigned long)ret_from_irq ||
397 pc == (unsigned long)ret_from_exception) {
398 struct pt_regs *regs;
399 if (*sp >= stack_page &&
400 *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
401 regs = (struct pt_regs *)*sp;
402 pc = regs->cp0_epc;
403 if (__kernel_text_address(pc)) {
404 *sp = regs->regs[29];
405 *ra = regs->regs[31];
406 return pc;
407 }
408 }
409 return 0;
410 }
411 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
412 return 0;
413 /*
414 * Return ra if an exception occurred at the first instruction
415 */
416 if (unlikely(ofs == 0)) {
417 pc = *ra;
418 *ra = 0;
419 return pc;
420 }
421
422 info.func = (void *)(pc - ofs);
423 info.func_size = ofs; /* analyze from start to ofs */
424 leaf = get_frame_info(&info);
425 if (leaf < 0)
426 return 0;
427
428 if (*sp < stack_page ||
429 *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
430 return 0;
431
432 if (leaf)
433 /*
434 * For some extreme cases, get_frame_info() can
435 * consider wrongly a nested function as a leaf
436 * one. In that cases avoid to return always the
437 * same value.
438 */
439 pc = pc != *ra ? *ra : 0;
440 else
441 pc = ((unsigned long *)(*sp))[info.pc_offset];
442
443 *sp += info.frame_size;
444 *ra = 0;
445 return __kernel_text_address(pc) ? pc : 0;
446 }
447 EXPORT_SYMBOL(unwind_stack_by_address);
448
449 /* used by show_backtrace() */
450 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
451 unsigned long pc, unsigned long *ra)
452 {
453 unsigned long stack_page = (unsigned long)task_stack_page(task);
454 return unwind_stack_by_address(stack_page, sp, pc, ra);
455 }
456 #endif
457
458 /*
459 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
460 */
461 unsigned long get_wchan(struct task_struct *task)
462 {
463 unsigned long pc = 0;
464 #ifdef CONFIG_KALLSYMS
465 unsigned long sp;
466 unsigned long ra = 0;
467 #endif
468
469 if (!task || task == current || task->state == TASK_RUNNING)
470 goto out;
471 if (!task_stack_page(task))
472 goto out;
473
474 pc = thread_saved_pc(task);
475
476 #ifdef CONFIG_KALLSYMS
477 sp = task->thread.reg29 + schedule_mfi.frame_size;
478
479 while (in_sched_functions(pc))
480 pc = unwind_stack(task, &sp, pc, &ra);
481 #endif
482
483 out:
484 return pc;
485 }
486
487 /*
488 * Don't forget that the stack pointer must be aligned on a 8 bytes
489 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
490 */
491 unsigned long arch_align_stack(unsigned long sp)
492 {
493 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
494 sp -= get_random_int() & ~PAGE_MASK;
495
496 return sp & ALMASK;
497 }
This page took 0.177553 seconds and 6 git commands to generate.