7901ee76b9be8f50fc344eb9d4cbea9247a82785
[deliverable/linux.git] / arch / xtensa / kernel / process.c
1 /*
2 * arch/xtensa/kernel/process.c
3 *
4 * Xtensa Processor version.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
13 * Chris Zankel <chris@zankel.net>
14 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
15 * Kevin Chea
16 */
17
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/stddef.h>
24 #include <linux/unistd.h>
25 #include <linux/ptrace.h>
26 #include <linux/elf.h>
27 #include <linux/init.h>
28 #include <linux/prctl.h>
29 #include <linux/init_task.h>
30 #include <linux/module.h>
31 #include <linux/mqueue.h>
32 #include <linux/fs.h>
33 #include <linux/slab.h>
34 #include <linux/rcupdate.h>
35
36 #include <asm/pgtable.h>
37 #include <asm/uaccess.h>
38 #include <asm/io.h>
39 #include <asm/processor.h>
40 #include <asm/platform.h>
41 #include <asm/mmu.h>
42 #include <asm/irq.h>
43 #include <linux/atomic.h>
44 #include <asm/asm-offsets.h>
45 #include <asm/regs.h>
46
47 extern void ret_from_fork(void);
48
49 struct task_struct *current_set[NR_CPUS] = {&init_task, };
50
51 void (*pm_power_off)(void) = NULL;
52 EXPORT_SYMBOL(pm_power_off);
53
54
55 #if XTENSA_HAVE_COPROCESSORS
56
57 void coprocessor_release_all(struct thread_info *ti)
58 {
59 unsigned long cpenable;
60 int i;
61
62 /* Make sure we don't switch tasks during this operation. */
63
64 preempt_disable();
65
66 /* Walk through all cp owners and release it for the requested one. */
67
68 cpenable = ti->cpenable;
69
70 for (i = 0; i < XCHAL_CP_MAX; i++) {
71 if (coprocessor_owner[i] == ti) {
72 coprocessor_owner[i] = 0;
73 cpenable &= ~(1 << i);
74 }
75 }
76
77 ti->cpenable = cpenable;
78 coprocessor_clear_cpenable();
79
80 preempt_enable();
81 }
82
83 void coprocessor_flush_all(struct thread_info *ti)
84 {
85 unsigned long cpenable;
86 int i;
87
88 preempt_disable();
89
90 cpenable = ti->cpenable;
91
92 for (i = 0; i < XCHAL_CP_MAX; i++) {
93 if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
94 coprocessor_flush(ti, i);
95 cpenable >>= 1;
96 }
97
98 preempt_enable();
99 }
100
101 #endif
102
103
104 /*
105 * Powermanagement idle function, if any is provided by the platform.
106 */
107
108 void cpu_idle(void)
109 {
110 local_irq_enable();
111
112 /* endless idle loop with no priority at all */
113 while (1) {
114 rcu_idle_enter();
115 while (!need_resched())
116 platform_idle();
117 rcu_idle_exit();
118 schedule_preempt_disabled();
119 }
120 }
121
122 /*
123 * This is called when the thread calls exit().
124 */
125 void exit_thread(void)
126 {
127 #if XTENSA_HAVE_COPROCESSORS
128 coprocessor_release_all(current_thread_info());
129 #endif
130 }
131
132 /*
133 * Flush thread state. This is called when a thread does an execve()
134 * Note that we flush coprocessor registers for the case execve fails.
135 */
136 void flush_thread(void)
137 {
138 #if XTENSA_HAVE_COPROCESSORS
139 struct thread_info *ti = current_thread_info();
140 coprocessor_flush_all(ti);
141 coprocessor_release_all(ti);
142 #endif
143 }
144
145 /*
146 * this gets called so that we can store coprocessor state into memory and
147 * copy the current task into the new thread.
148 */
149 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
150 {
151 #if XTENSA_HAVE_COPROCESSORS
152 coprocessor_flush_all(task_thread_info(src));
153 #endif
154 *dst = *src;
155 return 0;
156 }
157
158 /*
159 * Copy thread.
160 *
161 * The stack layout for the new thread looks like this:
162 *
163 * +------------------------+ <- sp in childregs (= tos)
164 * | childregs |
165 * +------------------------+ <- thread.sp = sp in dummy-frame
166 * | dummy-frame | (saved in dummy-frame spill-area)
167 * +------------------------+
168 *
169 * We create a dummy frame to return to ret_from_fork:
170 * a0 points to ret_from_fork (simulating a call4)
171 * sp points to itself (thread.sp)
172 * a2, a3 are unused.
173 *
174 * Note: This is a pristine frame, so we don't need any spill region on top of
175 * childregs.
176 *
177 * The fun part: if we're keeping the same VM (i.e. cloning a thread,
178 * not an entire process), we're normally given a new usp, and we CANNOT share
179 * any live address register windows. If we just copy those live frames over,
180 * the two threads (parent and child) will overflow the same frames onto the
181 * parent stack at different times, likely corrupting the parent stack (esp.
182 * if the parent returns from functions that called clone() and calls new
183 * ones, before the child overflows its now old copies of its parent windows).
184 * One solution is to spill windows to the parent stack, but that's fairly
185 * involved. Much simpler to just not copy those live frames across.
186 */
187
188 int copy_thread(unsigned long clone_flags, unsigned long usp,
189 unsigned long unused,
190 struct task_struct * p, struct pt_regs * regs)
191 {
192 struct pt_regs *childregs;
193 struct thread_info *ti;
194 unsigned long tos;
195 int user_mode = user_mode(regs);
196
197 /* Set up new TSS. */
198 tos = (unsigned long)task_stack_page(p) + THREAD_SIZE;
199 if (user_mode)
200 childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
201 else
202 childregs = (struct pt_regs*)tos - 1;
203
204 /* This does not copy all the regs. In a bout of brilliance or madness,
205 ARs beyond a0-a15 exist past the end of the struct. */
206 *childregs = *regs;
207
208 /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
209 *((int*)childregs - 3) = (unsigned long)childregs;
210 *((int*)childregs - 4) = 0;
211
212 childregs->areg[2] = 0;
213 p->set_child_tid = p->clear_child_tid = NULL;
214 p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
215 p->thread.sp = (unsigned long)childregs;
216
217 if (user_mode(regs)) {
218
219 childregs->areg[1] = usp;
220 if (clone_flags & CLONE_VM) {
221 childregs->wmask = 1; /* can't share live windows */
222 } else {
223 int len = childregs->wmask & ~0xf;
224 memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
225 &regs->areg[XCHAL_NUM_AREGS - len/4], len);
226 }
227 // FIXME: we need to set THREADPTR in thread_info...
228 if (clone_flags & CLONE_SETTLS)
229 childregs->areg[2] = childregs->areg[6];
230
231 } else {
232 /* In kernel space, we start a new thread with a new stack. */
233 childregs->wmask = 1;
234 childregs->areg[1] = tos;
235 }
236
237 #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
238 ti = task_thread_info(p);
239 ti->cpenable = 0;
240 #endif
241
242 return 0;
243 }
244
245
246 /*
247 * These bracket the sleeping functions..
248 */
249
250 unsigned long get_wchan(struct task_struct *p)
251 {
252 unsigned long sp, pc;
253 unsigned long stack_page = (unsigned long) task_stack_page(p);
254 int count = 0;
255
256 if (!p || p == current || p->state == TASK_RUNNING)
257 return 0;
258
259 sp = p->thread.sp;
260 pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
261
262 do {
263 if (sp < stack_page + sizeof(struct task_struct) ||
264 sp >= (stack_page + THREAD_SIZE) ||
265 pc == 0)
266 return 0;
267 if (!in_sched_functions(pc))
268 return pc;
269
270 /* Stack layout: sp-4: ra, sp-3: sp' */
271
272 pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
273 sp = *(unsigned long *)sp - 3;
274 } while (count++ < 16);
275 return 0;
276 }
277
278 /*
279 * xtensa_gregset_t and 'struct pt_regs' are vastly different formats
280 * of processor registers. Besides different ordering,
281 * xtensa_gregset_t contains non-live register information that
282 * 'struct pt_regs' does not. Exception handling (primarily) uses
283 * 'struct pt_regs'. Core files and ptrace use xtensa_gregset_t.
284 *
285 */
286
287 void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
288 {
289 unsigned long wb, ws, wm;
290 int live, last;
291
292 wb = regs->windowbase;
293 ws = regs->windowstart;
294 wm = regs->wmask;
295 ws = ((ws >> wb) | (ws << (WSBITS - wb))) & ((1 << WSBITS) - 1);
296
297 /* Don't leak any random bits. */
298
299 memset(elfregs, 0, sizeof(*elfregs));
300
301 /* Note: PS.EXCM is not set while user task is running; its
302 * being set in regs->ps is for exception handling convenience.
303 */
304
305 elfregs->pc = regs->pc;
306 elfregs->ps = (regs->ps & ~(1 << PS_EXCM_BIT));
307 elfregs->lbeg = regs->lbeg;
308 elfregs->lend = regs->lend;
309 elfregs->lcount = regs->lcount;
310 elfregs->sar = regs->sar;
311 elfregs->windowstart = ws;
312
313 live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
314 last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
315 memcpy(elfregs->a, regs->areg, live * 4);
316 memcpy(elfregs->a + last, regs->areg + last, (wm >> 4) * 16);
317 }
318
319 int dump_fpu(void)
320 {
321 return 0;
322 }
323
324 asmlinkage
325 long xtensa_clone(unsigned long clone_flags, unsigned long newsp,
326 void __user *parent_tid, void *child_tls,
327 void __user *child_tid, long a5,
328 struct pt_regs *regs)
329 {
330 if (!newsp)
331 newsp = regs->areg[1];
332 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
333 }
334
335 /*
336 * xtensa_execve() executes a new program.
337 */
338
339 asmlinkage
340 long xtensa_execve(const char __user *name,
341 const char __user *const __user *argv,
342 const char __user *const __user *envp,
343 long a3, long a4, long a5,
344 struct pt_regs *regs)
345 {
346 long error;
347 struct filename *filename;
348
349 filename = getname(name);
350 error = PTR_ERR(filename);
351 if (IS_ERR(filename))
352 goto out;
353 error = do_execve(filename->name, argv, envp, regs);
354 putname(filename);
355 out:
356 return error;
357 }
358
This page took 0.04031 seconds and 4 git commands to generate.