1 /* MN10300 Process handling code
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
11 #include <linux/module.h>
12 #include <linux/errno.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
16 #include <linux/smp.h>
17 #include <linux/stddef.h>
18 #include <linux/unistd.h>
19 #include <linux/ptrace.h>
20 #include <linux/user.h>
21 #include <linux/interrupt.h>
22 #include <linux/delay.h>
23 #include <linux/reboot.h>
24 #include <linux/percpu.h>
25 #include <linux/err.h>
27 #include <linux/slab.h>
28 #include <linux/rcupdate.h>
29 #include <asm/uaccess.h>
30 #include <asm/pgtable.h>
32 #include <asm/processor.h>
33 #include <asm/mmu_context.h>
35 #include <asm/reset-regs.h>
36 #include <asm/gdb-stub.h>
40 * return saved PC of a blocked thread.
42 unsigned long thread_saved_pc(struct task_struct
*tsk
)
44 return ((unsigned long *) tsk
->thread
.sp
)[3];
48 * power off function, if any
50 void (*pm_power_off
)(void);
51 EXPORT_SYMBOL(pm_power_off
);
53 #if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
55 * we use this if we don't have any better idle routine
57 static void default_idle(void)
66 #else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
68 * On SMP it's slightly faster (but much more power-consuming!)
69 * to poll the ->work.need_resched flag instead of waiting for the
70 * cross-CPU IPI to arrive. Use this option with caution.
72 static inline void poll_idle(void)
79 * Deal with another CPU just having chosen a thread to
82 oldval
= test_and_clear_thread_flag(TIF_NEED_RESCHED
);
85 set_thread_flag(TIF_POLLING_NRFLAG
);
86 while (!need_resched())
88 clear_thread_flag(TIF_POLLING_NRFLAG
);
93 #endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
97 * - there's no useful work to be done, so just try to conserve power and have
98 * a low exit latency (ie sit in a loop waiting for somebody to say that
99 * they'd like to reschedule)
103 /* endless idle loop with no priority at all */
106 while (!need_resched()) {
111 #if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
113 #else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
115 #endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
121 schedule_preempt_disabled();
125 void release_segments(struct mm_struct
*mm
)
129 void machine_restart(char *cmd
)
131 #ifdef CONFIG_KERNEL_DEBUGGER
135 #ifdef mn10300_unit_hard_reset
136 mn10300_unit_hard_reset();
138 mn10300_proc_hard_reset();
142 void machine_halt(void)
144 #ifdef CONFIG_KERNEL_DEBUGGER
149 void machine_power_off(void)
151 #ifdef CONFIG_KERNEL_DEBUGGER
156 void show_regs(struct pt_regs
*regs
)
161 * free current thread data structures etc..
163 void exit_thread(void)
168 void flush_thread(void)
173 void release_thread(struct task_struct
*dead_task
)
178 * we do not have to muck with descriptors here, that is
179 * done in switch_mm() as needed.
181 void copy_segments(struct task_struct
*p
, struct mm_struct
*new_mm
)
186 * this gets called so that we can store lazy state into memory and copy the
187 * current task into the new thread.
189 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
197 * set up the kernel stack for a new thread and copy arch-specific thread
198 * control information
200 int copy_thread(unsigned long clone_flags
,
201 unsigned long c_usp
, unsigned long ustk_size
,
202 struct task_struct
*p
)
204 struct thread_info
*ti
= task_thread_info(p
);
205 struct pt_regs
*c_regs
;
208 c_ksp
= (unsigned long) task_stack_page(p
) + THREAD_SIZE
;
210 /* allocate the userspace exception frame and set it up */
211 c_ksp
-= sizeof(struct pt_regs
);
212 c_regs
= (struct pt_regs
*) c_ksp
;
213 c_ksp
-= 12; /* allocate function call ABI slack */
215 /* set up things up so the scheduler can start the new task */
216 p
->thread
.uregs
= c_regs
;
218 p
->thread
.a3
= (unsigned long) c_regs
;
219 p
->thread
.sp
= c_ksp
;
220 p
->thread
.wchan
= p
->thread
.pc
;
221 p
->thread
.usp
= c_usp
;
223 if (unlikely(p
->flags
& PF_KTHREAD
)) {
224 memset(c_regs
, 0, sizeof(struct pt_regs
));
225 c_regs
->a0
= c_usp
; /* function */
226 c_regs
->d0
= ustk_size
; /* argument */
227 local_save_flags(c_regs
->epsw
);
228 c_regs
->epsw
|= EPSW_IE
| EPSW_IM_7
;
229 p
->thread
.pc
= (unsigned long) ret_from_kernel_thread
;
232 *c_regs
= *current_pt_regs();
235 c_regs
->epsw
&= ~EPSW_FE
; /* my FPU */
237 /* the new TLS pointer is passed in as arg #5 to sys_clone() */
238 if (clone_flags
& CLONE_SETTLS
)
239 c_regs
->e2
= current_frame()->d3
;
241 p
->thread
.pc
= (unsigned long) ret_from_fork
;
246 unsigned long get_wchan(struct task_struct
*p
)
248 return p
->thread
.wchan
;
This page took 0.038377 seconds and 6 git commands to generate.