Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
[deliverable/linux.git] / arch / frv / kernel / process.c
1 /* process.c: FRV specific parts of process handling
2 *
3 * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from arch/m68k/kernel/process.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13 #include <linux/errno.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/smp.h>
18 #include <linux/smp_lock.h>
19 #include <linux/stddef.h>
20 #include <linux/unistd.h>
21 #include <linux/ptrace.h>
22 #include <linux/slab.h>
23 #include <linux/user.h>
24 #include <linux/elf.h>
25 #include <linux/reboot.h>
26 #include <linux/interrupt.h>
27
28 #include <asm/uaccess.h>
29 #include <asm/system.h>
30 #include <asm/setup.h>
31 #include <asm/pgtable.h>
32 #include <asm/gdb-stub.h>
33 #include <asm/mb-regs.h>
34
35 #include "local.h"
36
37 asmlinkage void ret_from_fork(void);
38
39 #include <asm/pgalloc.h>
40
41 struct task_struct *alloc_task_struct(void)
42 {
43 struct task_struct *p = kmalloc(THREAD_SIZE, GFP_KERNEL);
44 if (p)
45 atomic_set((atomic_t *)(p+1), 1);
46 return p;
47 }
48
49 void free_task_struct(struct task_struct *p)
50 {
51 if (atomic_dec_and_test((atomic_t *)(p+1)))
52 kfree(p);
53 }
54
55 static void core_sleep_idle(void)
56 {
57 #ifdef LED_DEBUG_SLEEP
58 /* Show that we're sleeping... */
59 __set_LEDS(0x55aa);
60 #endif
61 frv_cpu_core_sleep();
62 #ifdef LED_DEBUG_SLEEP
63 /* ... and that we woke up */
64 __set_LEDS(0);
65 #endif
66 mb();
67 }
68
69 void (*idle)(void) = core_sleep_idle;
70
71 /*
72 * The idle thread. There's no useful work to be
73 * done, so just try to conserve power and have a
74 * low exit latency (ie sit in a loop waiting for
75 * somebody to say that they'd like to reschedule)
76 */
77 void cpu_idle(void)
78 {
79 int cpu = smp_processor_id();
80
81 /* endless idle loop with no priority at all */
82 while (1) {
83 while (!need_resched()) {
84 irq_stat[cpu].idle_timestamp = jiffies;
85
86 if (!frv_dma_inprogress && idle)
87 idle();
88 }
89
90 preempt_enable_no_resched();
91 schedule();
92 preempt_disable();
93 }
94 }
95
96 void machine_restart(char * __unused)
97 {
98 unsigned long reset_addr;
99 #ifdef CONFIG_GDBSTUB
100 gdbstub_exit(0);
101 #endif
102
103 if (PSR_IMPLE(__get_PSR()) == PSR_IMPLE_FR551)
104 reset_addr = 0xfefff500;
105 else
106 reset_addr = 0xfeff0500;
107
108 /* Software reset. */
109 asm volatile(" dcef @(gr0,gr0),1 ! membar !"
110 " sti %1,@(%0,0) !"
111 " nop ! nop ! nop ! nop ! nop ! "
112 " nop ! nop ! nop ! nop ! nop ! "
113 " nop ! nop ! nop ! nop ! nop ! "
114 " nop ! nop ! nop ! nop ! nop ! "
115 : : "r" (reset_addr), "r" (1) );
116
117 for (;;)
118 ;
119 }
120
121 void machine_halt(void)
122 {
123 #ifdef CONFIG_GDBSTUB
124 gdbstub_exit(0);
125 #endif
126
127 for (;;);
128 }
129
130 void machine_power_off(void)
131 {
132 #ifdef CONFIG_GDBSTUB
133 gdbstub_exit(0);
134 #endif
135
136 for (;;);
137 }
138
139 void flush_thread(void)
140 {
141 #if 0 //ndef NO_FPU
142 unsigned long zero = 0;
143 #endif
144 set_fs(USER_DS);
145 }
146
147 inline unsigned long user_stack(const struct pt_regs *regs)
148 {
149 while (regs->next_frame)
150 regs = regs->next_frame;
151 return user_mode(regs) ? regs->sp : 0;
152 }
153
154 asmlinkage int sys_fork(void)
155 {
156 #ifndef CONFIG_MMU
157 /* fork almost works, enough to trick you into looking elsewhere:-( */
158 return -EINVAL;
159 #else
160 return do_fork(SIGCHLD, user_stack(__frame), __frame, 0, NULL, NULL);
161 #endif
162 }
163
164 asmlinkage int sys_vfork(void)
165 {
166 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, user_stack(__frame), __frame, 0,
167 NULL, NULL);
168 }
169
170 /*****************************************************************************/
171 /*
172 * clone a process
173 * - tlsptr is retrieved by copy_thread()
174 */
175 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
176 int __user *parent_tidptr, int __user *child_tidptr,
177 int __user *tlsptr)
178 {
179 if (!newsp)
180 newsp = user_stack(__frame);
181 return do_fork(clone_flags, newsp, __frame, 0, parent_tidptr, child_tidptr);
182 } /* end sys_clone() */
183
184 /*****************************************************************************/
185 /*
186 * This gets called before we allocate a new thread and copy
187 * the current task into it.
188 */
189 void prepare_to_copy(struct task_struct *tsk)
190 {
191 //unlazy_fpu(tsk);
192 } /* end prepare_to_copy() */
193
194 /*****************************************************************************/
195 /*
196 * set up the kernel stack and exception frames for a new process
197 */
198 int copy_thread(int nr, unsigned long clone_flags,
199 unsigned long usp, unsigned long topstk,
200 struct task_struct *p, struct pt_regs *regs)
201 {
202 struct pt_regs *childregs0, *childregs, *regs0;
203
204 regs0 = __kernel_frame0_ptr;
205 childregs0 = (struct pt_regs *)
206 (task_stack_page(p) + THREAD_SIZE - USER_CONTEXT_SIZE);
207 childregs = childregs0;
208
209 /* set up the userspace frame (the only place that the USP is stored) */
210 *childregs0 = *regs0;
211
212 childregs0->gr8 = 0;
213 childregs0->sp = usp;
214 childregs0->next_frame = NULL;
215
216 /* set up the return kernel frame if called from kernel_thread() */
217 if (regs != regs0) {
218 childregs--;
219 *childregs = *regs;
220 childregs->sp = (unsigned long) childregs0;
221 childregs->next_frame = childregs0;
222 childregs->gr15 = (unsigned long) task_thread_info(p);
223 childregs->gr29 = (unsigned long) p;
224 }
225
226 p->set_child_tid = p->clear_child_tid = NULL;
227
228 p->thread.frame = childregs;
229 p->thread.curr = p;
230 p->thread.sp = (unsigned long) childregs;
231 p->thread.fp = 0;
232 p->thread.lr = 0;
233 p->thread.pc = (unsigned long) ret_from_fork;
234 p->thread.frame0 = childregs0;
235
236 /* the new TLS pointer is passed in as arg #5 to sys_clone() */
237 if (clone_flags & CLONE_SETTLS)
238 childregs->gr29 = childregs->gr12;
239
240 save_user_regs(p->thread.user);
241
242 return 0;
243 } /* end copy_thread() */
244
245 /*
246 * sys_execve() executes a new program.
247 */
248 asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp)
249 {
250 int error;
251 char * filename;
252
253 lock_kernel();
254 filename = getname(name);
255 error = PTR_ERR(filename);
256 if (IS_ERR(filename))
257 goto out;
258 error = do_execve(filename, argv, envp, __frame);
259 putname(filename);
260 out:
261 unlock_kernel();
262 return error;
263 }
264
265 unsigned long get_wchan(struct task_struct *p)
266 {
267 struct pt_regs *regs0;
268 unsigned long fp, pc;
269 unsigned long stack_limit;
270 int count = 0;
271 if (!p || p == current || p->state == TASK_RUNNING)
272 return 0;
273
274 stack_limit = (unsigned long) (p + 1);
275 fp = p->thread.fp;
276 regs0 = p->thread.frame0;
277
278 do {
279 if (fp < stack_limit || fp >= (unsigned long) regs0 || fp & 3)
280 return 0;
281
282 pc = ((unsigned long *) fp)[2];
283
284 /* FIXME: This depends on the order of these functions. */
285 if (!in_sched_functions(pc))
286 return pc;
287
288 fp = *(unsigned long *) fp;
289 } while (count++ < 16);
290
291 return 0;
292 }
293
294 unsigned long thread_saved_pc(struct task_struct *tsk)
295 {
296 /* Check whether the thread is blocked in resume() */
297 if (in_sched_functions(tsk->thread.pc))
298 return ((unsigned long *)tsk->thread.fp)[2];
299 else
300 return tsk->thread.pc;
301 }
302
303 int elf_check_arch(const struct elf32_hdr *hdr)
304 {
305 unsigned long hsr0 = __get_HSR(0);
306 unsigned long psr = __get_PSR();
307
308 if (hdr->e_machine != EM_FRV)
309 return 0;
310
311 switch (hdr->e_flags & EF_FRV_GPR_MASK) {
312 case EF_FRV_GPR64:
313 if ((hsr0 & HSR0_GRN) == HSR0_GRN_32)
314 return 0;
315 case EF_FRV_GPR32:
316 case 0:
317 break;
318 default:
319 return 0;
320 }
321
322 switch (hdr->e_flags & EF_FRV_FPR_MASK) {
323 case EF_FRV_FPR64:
324 if ((hsr0 & HSR0_FRN) == HSR0_FRN_32)
325 return 0;
326 case EF_FRV_FPR32:
327 case EF_FRV_FPR_NONE:
328 case 0:
329 break;
330 default:
331 return 0;
332 }
333
334 if ((hdr->e_flags & EF_FRV_MULADD) == EF_FRV_MULADD)
335 if (PSR_IMPLE(psr) != PSR_IMPLE_FR405 &&
336 PSR_IMPLE(psr) != PSR_IMPLE_FR451)
337 return 0;
338
339 switch (hdr->e_flags & EF_FRV_CPU_MASK) {
340 case EF_FRV_CPU_GENERIC:
341 break;
342 case EF_FRV_CPU_FR300:
343 case EF_FRV_CPU_SIMPLE:
344 case EF_FRV_CPU_TOMCAT:
345 default:
346 return 0;
347 case EF_FRV_CPU_FR400:
348 if (PSR_IMPLE(psr) != PSR_IMPLE_FR401 &&
349 PSR_IMPLE(psr) != PSR_IMPLE_FR405 &&
350 PSR_IMPLE(psr) != PSR_IMPLE_FR451 &&
351 PSR_IMPLE(psr) != PSR_IMPLE_FR551)
352 return 0;
353 break;
354 case EF_FRV_CPU_FR450:
355 if (PSR_IMPLE(psr) != PSR_IMPLE_FR451)
356 return 0;
357 break;
358 case EF_FRV_CPU_FR500:
359 if (PSR_IMPLE(psr) != PSR_IMPLE_FR501)
360 return 0;
361 break;
362 case EF_FRV_CPU_FR550:
363 if (PSR_IMPLE(psr) != PSR_IMPLE_FR551)
364 return 0;
365 break;
366 }
367
368 return 1;
369 }
This page took 0.039111 seconds and 6 git commands to generate.