Merge branch 'upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/linville...
[deliverable/linux.git] / arch / frv / kernel / process.c
1 /* process.c: FRV specific parts of process handling
2 *
3 * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from arch/m68k/kernel/process.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13 #include <linux/module.h>
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/elf.h>
26 #include <linux/reboot.h>
27 #include <linux/interrupt.h>
28
29 #include <asm/asm-offsets.h>
30 #include <asm/uaccess.h>
31 #include <asm/system.h>
32 #include <asm/setup.h>
33 #include <asm/pgtable.h>
34 #include <asm/gdb-stub.h>
35 #include <asm/mb-regs.h>
36
37 #include "local.h"
38
39 asmlinkage void ret_from_fork(void);
40
41 #include <asm/pgalloc.h>
42
43 void (*pm_power_off)(void);
44 EXPORT_SYMBOL(pm_power_off);
45
46 struct task_struct *alloc_task_struct(void)
47 {
48 struct task_struct *p = kmalloc(THREAD_SIZE, GFP_KERNEL);
49 if (p)
50 atomic_set((atomic_t *)(p+1), 1);
51 return p;
52 }
53
54 void free_task_struct(struct task_struct *p)
55 {
56 if (atomic_dec_and_test((atomic_t *)(p+1)))
57 kfree(p);
58 }
59
60 static void core_sleep_idle(void)
61 {
62 #ifdef LED_DEBUG_SLEEP
63 /* Show that we're sleeping... */
64 __set_LEDS(0x55aa);
65 #endif
66 frv_cpu_core_sleep();
67 #ifdef LED_DEBUG_SLEEP
68 /* ... and that we woke up */
69 __set_LEDS(0);
70 #endif
71 mb();
72 }
73
74 void (*idle)(void) = core_sleep_idle;
75
76 /*
77 * The idle thread. There's no useful work to be
78 * done, so just try to conserve power and have a
79 * low exit latency (ie sit in a loop waiting for
80 * somebody to say that they'd like to reschedule)
81 */
82 void cpu_idle(void)
83 {
84 int cpu = smp_processor_id();
85
86 /* endless idle loop with no priority at all */
87 while (1) {
88 while (!need_resched()) {
89 irq_stat[cpu].idle_timestamp = jiffies;
90
91 if (!frv_dma_inprogress && idle)
92 idle();
93 }
94
95 preempt_enable_no_resched();
96 schedule();
97 preempt_disable();
98 }
99 }
100
101 void machine_restart(char * __unused)
102 {
103 unsigned long reset_addr;
104 #ifdef CONFIG_GDBSTUB
105 gdbstub_exit(0);
106 #endif
107
108 if (PSR_IMPLE(__get_PSR()) == PSR_IMPLE_FR551)
109 reset_addr = 0xfefff500;
110 else
111 reset_addr = 0xfeff0500;
112
113 /* Software reset. */
114 asm volatile(" dcef @(gr0,gr0),1 ! membar !"
115 " sti %1,@(%0,0) !"
116 " nop ! nop ! nop ! nop ! nop ! "
117 " nop ! nop ! nop ! nop ! nop ! "
118 " nop ! nop ! nop ! nop ! nop ! "
119 " nop ! nop ! nop ! nop ! nop ! "
120 : : "r" (reset_addr), "r" (1) );
121
122 for (;;)
123 ;
124 }
125
126 void machine_halt(void)
127 {
128 #ifdef CONFIG_GDBSTUB
129 gdbstub_exit(0);
130 #endif
131
132 for (;;);
133 }
134
135 void machine_power_off(void)
136 {
137 #ifdef CONFIG_GDBSTUB
138 gdbstub_exit(0);
139 #endif
140
141 for (;;);
142 }
143
144 void flush_thread(void)
145 {
146 #if 0 //ndef NO_FPU
147 unsigned long zero = 0;
148 #endif
149 set_fs(USER_DS);
150 }
151
152 inline unsigned long user_stack(const struct pt_regs *regs)
153 {
154 while (regs->next_frame)
155 regs = regs->next_frame;
156 return user_mode(regs) ? regs->sp : 0;
157 }
158
159 asmlinkage int sys_fork(void)
160 {
161 #ifndef CONFIG_MMU
162 /* fork almost works, enough to trick you into looking elsewhere:-( */
163 return -EINVAL;
164 #else
165 return do_fork(SIGCHLD, user_stack(__frame), __frame, 0, NULL, NULL);
166 #endif
167 }
168
169 asmlinkage int sys_vfork(void)
170 {
171 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, user_stack(__frame), __frame, 0,
172 NULL, NULL);
173 }
174
175 /*****************************************************************************/
176 /*
177 * clone a process
178 * - tlsptr is retrieved by copy_thread()
179 */
180 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
181 int __user *parent_tidptr, int __user *child_tidptr,
182 int __user *tlsptr)
183 {
184 if (!newsp)
185 newsp = user_stack(__frame);
186 return do_fork(clone_flags, newsp, __frame, 0, parent_tidptr, child_tidptr);
187 } /* end sys_clone() */
188
189 /*****************************************************************************/
190 /*
191 * This gets called before we allocate a new thread and copy
192 * the current task into it.
193 */
194 void prepare_to_copy(struct task_struct *tsk)
195 {
196 //unlazy_fpu(tsk);
197 } /* end prepare_to_copy() */
198
199 /*****************************************************************************/
200 /*
201 * set up the kernel stack and exception frames for a new process
202 */
203 int copy_thread(int nr, unsigned long clone_flags,
204 unsigned long usp, unsigned long topstk,
205 struct task_struct *p, struct pt_regs *regs)
206 {
207 struct pt_regs *childregs0, *childregs, *regs0;
208
209 regs0 = __kernel_frame0_ptr;
210 childregs0 = (struct pt_regs *)
211 (task_stack_page(p) + THREAD_SIZE - FRV_FRAME0_SIZE);
212 childregs = childregs0;
213
214 /* set up the userspace frame (the only place that the USP is stored) */
215 *childregs0 = *regs0;
216
217 childregs0->gr8 = 0;
218 childregs0->sp = usp;
219 childregs0->next_frame = NULL;
220
221 /* set up the return kernel frame if called from kernel_thread() */
222 if (regs != regs0) {
223 childregs--;
224 *childregs = *regs;
225 childregs->sp = (unsigned long) childregs0;
226 childregs->next_frame = childregs0;
227 childregs->gr15 = (unsigned long) task_thread_info(p);
228 childregs->gr29 = (unsigned long) p;
229 }
230
231 p->set_child_tid = p->clear_child_tid = NULL;
232
233 p->thread.frame = childregs;
234 p->thread.curr = p;
235 p->thread.sp = (unsigned long) childregs;
236 p->thread.fp = 0;
237 p->thread.lr = 0;
238 p->thread.pc = (unsigned long) ret_from_fork;
239 p->thread.frame0 = childregs0;
240
241 /* the new TLS pointer is passed in as arg #5 to sys_clone() */
242 if (clone_flags & CLONE_SETTLS)
243 childregs->gr29 = childregs->gr12;
244
245 save_user_regs(p->thread.user);
246
247 return 0;
248 } /* end copy_thread() */
249
250 /*
251 * sys_execve() executes a new program.
252 */
253 asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp)
254 {
255 int error;
256 char * filename;
257
258 lock_kernel();
259 filename = getname(name);
260 error = PTR_ERR(filename);
261 if (IS_ERR(filename))
262 goto out;
263 error = do_execve(filename, argv, envp, __frame);
264 putname(filename);
265 out:
266 unlock_kernel();
267 return error;
268 }
269
270 unsigned long get_wchan(struct task_struct *p)
271 {
272 struct pt_regs *regs0;
273 unsigned long fp, pc;
274 unsigned long stack_limit;
275 int count = 0;
276 if (!p || p == current || p->state == TASK_RUNNING)
277 return 0;
278
279 stack_limit = (unsigned long) (p + 1);
280 fp = p->thread.fp;
281 regs0 = p->thread.frame0;
282
283 do {
284 if (fp < stack_limit || fp >= (unsigned long) regs0 || fp & 3)
285 return 0;
286
287 pc = ((unsigned long *) fp)[2];
288
289 /* FIXME: This depends on the order of these functions. */
290 if (!in_sched_functions(pc))
291 return pc;
292
293 fp = *(unsigned long *) fp;
294 } while (count++ < 16);
295
296 return 0;
297 }
298
299 unsigned long thread_saved_pc(struct task_struct *tsk)
300 {
301 /* Check whether the thread is blocked in resume() */
302 if (in_sched_functions(tsk->thread.pc))
303 return ((unsigned long *)tsk->thread.fp)[2];
304 else
305 return tsk->thread.pc;
306 }
307
308 int elf_check_arch(const struct elf32_hdr *hdr)
309 {
310 unsigned long hsr0 = __get_HSR(0);
311 unsigned long psr = __get_PSR();
312
313 if (hdr->e_machine != EM_FRV)
314 return 0;
315
316 switch (hdr->e_flags & EF_FRV_GPR_MASK) {
317 case EF_FRV_GPR64:
318 if ((hsr0 & HSR0_GRN) == HSR0_GRN_32)
319 return 0;
320 case EF_FRV_GPR32:
321 case 0:
322 break;
323 default:
324 return 0;
325 }
326
327 switch (hdr->e_flags & EF_FRV_FPR_MASK) {
328 case EF_FRV_FPR64:
329 if ((hsr0 & HSR0_FRN) == HSR0_FRN_32)
330 return 0;
331 case EF_FRV_FPR32:
332 case EF_FRV_FPR_NONE:
333 case 0:
334 break;
335 default:
336 return 0;
337 }
338
339 if ((hdr->e_flags & EF_FRV_MULADD) == EF_FRV_MULADD)
340 if (PSR_IMPLE(psr) != PSR_IMPLE_FR405 &&
341 PSR_IMPLE(psr) != PSR_IMPLE_FR451)
342 return 0;
343
344 switch (hdr->e_flags & EF_FRV_CPU_MASK) {
345 case EF_FRV_CPU_GENERIC:
346 break;
347 case EF_FRV_CPU_FR300:
348 case EF_FRV_CPU_SIMPLE:
349 case EF_FRV_CPU_TOMCAT:
350 default:
351 return 0;
352 case EF_FRV_CPU_FR400:
353 if (PSR_IMPLE(psr) != PSR_IMPLE_FR401 &&
354 PSR_IMPLE(psr) != PSR_IMPLE_FR405 &&
355 PSR_IMPLE(psr) != PSR_IMPLE_FR451 &&
356 PSR_IMPLE(psr) != PSR_IMPLE_FR551)
357 return 0;
358 break;
359 case EF_FRV_CPU_FR450:
360 if (PSR_IMPLE(psr) != PSR_IMPLE_FR451)
361 return 0;
362 break;
363 case EF_FRV_CPU_FR500:
364 if (PSR_IMPLE(psr) != PSR_IMPLE_FR501)
365 return 0;
366 break;
367 case EF_FRV_CPU_FR550:
368 if (PSR_IMPLE(psr) != PSR_IMPLE_FR551)
369 return 0;
370 break;
371 }
372
373 return 1;
374 }
375
376 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
377 {
378 memcpy(fpregs,
379 &current->thread.user->f,
380 sizeof(current->thread.user->f));
381 return 1;
382 }
This page took 0.038068 seconds and 6 git commands to generate.