bfin: switch to generic vfork, get rid of pointless wrappers
[deliverable/linux.git] / arch / microblaze / kernel / process.c
CommitLineData
6496a23a
MS
1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/pm.h>
14#include <linux/tick.h>
15#include <linux/bitops.h>
f3268edb 16#include <linux/ptrace.h>
6496a23a 17#include <asm/pgalloc.h>
40db0834 18#include <asm/uaccess.h> /* for USER_DS macros */
a1f55113 19#include <asm/cacheflush.h>
6496a23a
MS
20
21void show_regs(struct pt_regs *regs)
22{
ac3efab5 23 printk(KERN_INFO " Registers dump: mode=%X\r\n", regs->pt_mode);
6496a23a
MS
24 printk(KERN_INFO " r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n",
25 regs->r1, regs->r2, regs->r3, regs->r4);
26 printk(KERN_INFO " r5=%08lX, r6=%08lX, r7=%08lX, r8=%08lX\n",
27 regs->r5, regs->r6, regs->r7, regs->r8);
28 printk(KERN_INFO " r9=%08lX, r10=%08lX, r11=%08lX, r12=%08lX\n",
29 regs->r9, regs->r10, regs->r11, regs->r12);
30 printk(KERN_INFO " r13=%08lX, r14=%08lX, r15=%08lX, r16=%08lX\n",
31 regs->r13, regs->r14, regs->r15, regs->r16);
32 printk(KERN_INFO " r17=%08lX, r18=%08lX, r19=%08lX, r20=%08lX\n",
33 regs->r17, regs->r18, regs->r19, regs->r20);
34 printk(KERN_INFO " r21=%08lX, r22=%08lX, r23=%08lX, r24=%08lX\n",
35 regs->r21, regs->r22, regs->r23, regs->r24);
36 printk(KERN_INFO " r25=%08lX, r26=%08lX, r27=%08lX, r28=%08lX\n",
37 regs->r25, regs->r26, regs->r27, regs->r28);
38 printk(KERN_INFO " r29=%08lX, r30=%08lX, r31=%08lX, rPC=%08lX\n",
39 regs->r29, regs->r30, regs->r31, regs->pc);
40 printk(KERN_INFO " msr=%08lX, ear=%08lX, esr=%08lX, fsr=%08lX\n",
41 regs->msr, regs->ear, regs->esr, regs->fsr);
6496a23a
MS
42}
43
44void (*pm_idle)(void);
45void (*pm_power_off)(void) = NULL;
46EXPORT_SYMBOL(pm_power_off);
47
48static int hlt_counter = 1;
49
50void disable_hlt(void)
51{
52 hlt_counter++;
53}
54EXPORT_SYMBOL(disable_hlt);
55
56void enable_hlt(void)
57{
58 hlt_counter--;
59}
60EXPORT_SYMBOL(enable_hlt);
61
62static int __init nohlt_setup(char *__unused)
63{
64 hlt_counter = 1;
65 return 1;
66}
67__setup("nohlt", nohlt_setup);
68
69static int __init hlt_setup(char *__unused)
70{
71 hlt_counter = 0;
72 return 1;
73}
74__setup("hlt", hlt_setup);
75
76void default_idle(void)
77{
78ebfa88 78 if (likely(hlt_counter)) {
d0f140e0
MS
79 local_irq_disable();
80 stop_critical_timings();
81 cpu_relax();
82 start_critical_timings();
83 local_irq_enable();
78ebfa88 84 } else {
6496a23a
MS
85 clear_thread_flag(TIF_POLLING_NRFLAG);
86 smp_mb__after_clear_bit();
87 local_irq_disable();
88 while (!need_resched())
89 cpu_sleep();
90 local_irq_enable();
91 set_thread_flag(TIF_POLLING_NRFLAG);
78ebfa88 92 }
6496a23a
MS
93}
94
95void cpu_idle(void)
96{
97 set_thread_flag(TIF_POLLING_NRFLAG);
98
99 /* endless idle loop with no priority at all */
100 while (1) {
101 void (*idle)(void) = pm_idle;
102
103 if (!idle)
104 idle = default_idle;
105
1268fbc7
FW
106 tick_nohz_idle_enter();
107 rcu_idle_enter();
6496a23a
MS
108 while (!need_resched())
109 idle();
1268fbc7
FW
110 rcu_idle_exit();
111 tick_nohz_idle_exit();
6496a23a 112
bd2f5536 113 schedule_preempt_disabled();
6496a23a
MS
114 check_pgt_cache();
115 }
116}
117
118void flush_thread(void)
119{
120}
121
a8fb748e 122int copy_thread(unsigned long clone_flags, unsigned long usp,
2319295d 123 unsigned long arg,
f3268edb 124 struct task_struct *p, struct pt_regs *unused)
6496a23a
MS
125{
126 struct pt_regs *childregs = task_pt_regs(p);
127 struct thread_info *ti = task_thread_info(p);
128
2319295d
AV
129 if (unlikely(p->flags & PF_KTHREAD)) {
130 /* if we're creating a new kernel thread then just zeroing all
131 * the registers. That's OK for a brand new thread.*/
132 memset(childregs, 0, sizeof(struct pt_regs));
133 memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
134 ti->cpu_context.r1 = (unsigned long)childregs;
135 ti->cpu_context.r20 = (unsigned long)usp; /* fn */
136 ti->cpu_context.r19 = (unsigned long)arg;
137 childregs->pt_mode = 1;
138 local_save_flags(childregs->msr);
139#ifdef CONFIG_MMU
140 ti->cpu_context.msr = childregs->msr & ~MSR_IE;
141#endif
142 ti->cpu_context.r15 = (unsigned long)ret_from_kernel_thread - 8;
143 return 0;
144 }
f3268edb
AV
145 *childregs = *current_pt_regs();
146 if (usp)
147 childregs->r1 = usp;
6496a23a
MS
148
149 memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
150 ti->cpu_context.r1 = (unsigned long)childregs;
2319295d 151#ifndef CONFIG_MMU
6496a23a 152 ti->cpu_context.msr = (unsigned long)childregs->msr;
5233806d 153#else
2319295d 154 childregs->msr |= MSR_UMS;
5233806d 155
5233806d
MS
156 /* we should consider the fact that childregs is a copy of the parent
157 * regs which were saved immediately after entering the kernel state
158 * before enabling VM. This MSR will be restored in switch_to and
159 * RETURN() and we want to have the right machine state there
160 * specifically this state must have INTs disabled before and enabled
161 * after performing rtbd
162 * compose the right MSR for RETURN(). It will work for switch_to also
163 * excepting for VM and UMS
164 * don't touch UMS , CARRY and cache bits
165 * right now MSR is a copy of parent one */
166 childregs->msr |= MSR_BIP;
167 childregs->msr &= ~MSR_EIP;
168 childregs->msr |= MSR_IE;
169 childregs->msr &= ~MSR_VM;
170 childregs->msr |= MSR_VMS;
171 childregs->msr |= MSR_EE; /* exceptions will be enabled*/
172
173 ti->cpu_context.msr = (childregs->msr|MSR_VM);
174 ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */
84ac218f 175 ti->cpu_context.msr &= ~MSR_IE;
5233806d 176#endif
6496a23a
MS
177 ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
178
d5c15f17
EI
179 /*
180 * r21 is the thread reg, r10 is 6th arg to clone
181 * which contains TLS area
182 */
6496a23a 183 if (clone_flags & CLONE_SETTLS)
d5c15f17 184 childregs->r21 = childregs->r10;
6496a23a
MS
185
186 return 0;
187}
188
5233806d 189#ifndef CONFIG_MMU
6496a23a
MS
190/*
191 * Return saved PC of a blocked thread.
192 */
193unsigned long thread_saved_pc(struct task_struct *tsk)
194{
195 struct cpu_context *ctx =
196 &(((struct thread_info *)(tsk->stack))->cpu_context);
197
198 /* Check whether the thread is blocked in resume() */
199 if (in_sched_functions(ctx->r15))
200 return (unsigned long)ctx->r15;
201 else
202 return ctx->r14;
203}
5233806d 204#endif
6496a23a 205
6496a23a
MS
206unsigned long get_wchan(struct task_struct *p)
207{
208/* TBD (used by procfs) */
209 return 0;
210}
e1c4bd08
MS
211
212/* Set up a thread for executing a new program */
213void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
214{
e1c4bd08
MS
215 regs->pc = pc;
216 regs->r1 = usp;
217 regs->pt_mode = 0;
f1ae3f69 218#ifdef CONFIG_MMU
866d7229 219 regs->msr |= MSR_UMS;
99c59f60 220 regs->msr &= ~MSR_VM;
f1ae3f69 221#endif
e1c4bd08 222}
5233806d
MS
223
224#ifdef CONFIG_MMU
225#include <linux/elfcore.h>
226/*
227 * Set up a thread for executing a new program
228 */
229int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
230{
231 return 0; /* MicroBlaze has no separate FPU registers */
232}
233#endif /* CONFIG_MMU */
This page took 0.211325 seconds and 5 git commands to generate.