[PATCH] i386: task_stack_page()
[deliverable/linux.git] / arch / sparc64 / kernel / process.c
CommitLineData
1da177e4
LT
1/* $Id: process.c,v 1.131 2002/02/09 19:49:30 davem Exp $
2 * arch/sparc64/kernel/process.c
3 *
4 * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9/*
10 * This file handles the architecture-dependent parts of process handling..
11 */
12
13#include <stdarg.h>
14
15#include <linux/config.h>
16#include <linux/errno.h>
17#include <linux/module.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/kallsyms.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/stddef.h>
25#include <linux/ptrace.h>
26#include <linux/slab.h>
27#include <linux/user.h>
28#include <linux/a.out.h>
29#include <linux/config.h>
30#include <linux/reboot.h>
31#include <linux/delay.h>
32#include <linux/compat.h>
33#include <linux/init.h>
34
35#include <asm/oplib.h>
36#include <asm/uaccess.h>
37#include <asm/system.h>
38#include <asm/page.h>
39#include <asm/pgalloc.h>
40#include <asm/pgtable.h>
41#include <asm/processor.h>
42#include <asm/pstate.h>
43#include <asm/elf.h>
44#include <asm/fpumacro.h>
45#include <asm/head.h>
46#include <asm/cpudata.h>
47#include <asm/unistd.h>
48
49/* #define VERBOSE_SHOWREGS */
50
51/*
52 * Nothing special yet...
53 */
54void default_idle(void)
55{
56}
57
58#ifndef CONFIG_SMP
59
60/*
61 * the idle loop on a Sparc... ;)
62 */
63void cpu_idle(void)
64{
1da177e4
LT
65 /* endless idle loop with no priority at all */
66 for (;;) {
67 /* If current->work.need_resched is zero we should really
68 * setup for a system wakup event and execute a shutdown
69 * instruction.
70 *
71 * But this requires writing back the contents of the
72 * L2 cache etc. so implement this later. -DaveM
73 */
74 while (!need_resched())
75 barrier();
76
5bfb5d69 77 preempt_enable_no_resched();
1da177e4 78 schedule();
5bfb5d69 79 preempt_disable();
1da177e4
LT
80 check_pgt_cache();
81 }
1da177e4
LT
82}
83
84#else
85
86/*
87 * the idle loop on a UltraMultiPenguin...
64c7c8f8
NP
88 *
89 * TIF_POLLING_NRFLAG is set because we do not sleep the cpu
90 * inside of the idler task, so an interrupt is not needed
91 * to get a clean fast response.
92 *
93 * XXX Reverify this assumption... -DaveM
94 *
95 * Addendum: We do want it to do something for the signal
96 * delivery case, we detect that by just seeing
97 * if we are trying to send this to an idler or not.
1da177e4 98 */
1da177e4
LT
99void cpu_idle(void)
100{
64c7c8f8 101 cpuinfo_sparc *cpuinfo = &local_cpu_data();
1da177e4 102 set_thread_flag(TIF_POLLING_NRFLAG);
64c7c8f8 103
1da177e4
LT
104 while(1) {
105 if (need_resched()) {
64c7c8f8 106 cpuinfo->idle_volume = 0;
5bfb5d69 107 preempt_enable_no_resched();
1da177e4 108 schedule();
5bfb5d69 109 preempt_disable();
1da177e4
LT
110 check_pgt_cache();
111 }
64c7c8f8 112 cpuinfo->idle_volume++;
1da177e4
LT
113
114 /* The store ordering is so that IRQ handlers on
115 * other cpus see our increasing idleness for the buddy
116 * redistribution algorithm. -DaveM
117 */
4f07118f 118 membar_storeload_storestore();
1da177e4
LT
119 }
120}
121
122#endif
123
124extern char reboot_command [];
125
126extern void (*prom_palette)(int);
127extern void (*prom_keyboard)(void);
128
129void machine_halt(void)
130{
131 if (!serial_console && prom_palette)
132 prom_palette (1);
133 if (prom_keyboard)
134 prom_keyboard();
135 prom_halt();
136 panic("Halt failed!");
137}
138
1da177e4
LT
139void machine_alt_power_off(void)
140{
141 if (!serial_console && prom_palette)
142 prom_palette(1);
143 if (prom_keyboard)
144 prom_keyboard();
145 prom_halt_power_off();
146 panic("Power-off failed!");
147}
148
149void machine_restart(char * cmd)
150{
151 char *p;
152
153 p = strchr (reboot_command, '\n');
154 if (p) *p = 0;
155 if (!serial_console && prom_palette)
156 prom_palette (1);
157 if (prom_keyboard)
158 prom_keyboard();
159 if (cmd)
160 prom_reboot(cmd);
161 if (*reboot_command)
162 prom_reboot(reboot_command);
163 prom_reboot("");
164 panic("Reboot failed!");
165}
166
1da177e4
LT
167static void show_regwindow32(struct pt_regs *regs)
168{
169 struct reg_window32 __user *rw;
170 struct reg_window32 r_w;
171 mm_segment_t old_fs;
172
173 __asm__ __volatile__ ("flushw");
174 rw = compat_ptr((unsigned)regs->u_regs[14]);
175 old_fs = get_fs();
176 set_fs (USER_DS);
177 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
178 set_fs (old_fs);
179 return;
180 }
181
182 set_fs (old_fs);
183 printk("l0: %08x l1: %08x l2: %08x l3: %08x "
184 "l4: %08x l5: %08x l6: %08x l7: %08x\n",
185 r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
186 r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
187 printk("i0: %08x i1: %08x i2: %08x i3: %08x "
188 "i4: %08x i5: %08x i6: %08x i7: %08x\n",
189 r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
190 r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
191}
192
193static void show_regwindow(struct pt_regs *regs)
194{
195 struct reg_window __user *rw;
196 struct reg_window *rwk;
197 struct reg_window r_w;
198 mm_segment_t old_fs;
199
200 if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
201 __asm__ __volatile__ ("flushw");
202 rw = (struct reg_window __user *)
203 (regs->u_regs[14] + STACK_BIAS);
204 rwk = (struct reg_window *)
205 (regs->u_regs[14] + STACK_BIAS);
206 if (!(regs->tstate & TSTATE_PRIV)) {
207 old_fs = get_fs();
208 set_fs (USER_DS);
209 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
210 set_fs (old_fs);
211 return;
212 }
213 rwk = &r_w;
214 set_fs (old_fs);
215 }
216 } else {
217 show_regwindow32(regs);
218 return;
219 }
220 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
221 rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
222 printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
223 rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
224 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
225 rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
226 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
227 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
228 if (regs->tstate & TSTATE_PRIV)
229 print_symbol("I7: <%s>\n", rwk->ins[7]);
230}
231
232void show_stackframe(struct sparc_stackf *sf)
233{
234 unsigned long size;
235 unsigned long *stk;
236 int i;
237
238 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n"
239 "l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
240 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
241 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
242 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n"
243 "i4: %016lx i5: %016lx fp: %016lx ret_pc: %016lx\n",
244 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
245 sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
246 printk("sp: %016lx x0: %016lx x1: %016lx x2: %016lx\n"
247 "x3: %016lx x4: %016lx x5: %016lx xx: %016lx\n",
248 (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
249 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
250 sf->xxargs[0]);
251 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
252 size -= STACKFRAME_SZ;
253 stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ);
254 i = 0;
255 do {
256 printk("s%d: %016lx\n", i++, *stk++);
257 } while ((size -= sizeof(unsigned long)));
258}
259
260void show_stackframe32(struct sparc_stackf32 *sf)
261{
262 unsigned long size;
263 unsigned *stk;
264 int i;
265
266 printk("l0: %08x l1: %08x l2: %08x l3: %08x\n",
267 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3]);
268 printk("l4: %08x l5: %08x l6: %08x l7: %08x\n",
269 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
270 printk("i0: %08x i1: %08x i2: %08x i3: %08x\n",
271 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3]);
272 printk("i4: %08x i5: %08x fp: %08x ret_pc: %08x\n",
273 sf->ins[4], sf->ins[5], sf->fp, sf->callers_pc);
274 printk("sp: %08x x0: %08x x1: %08x x2: %08x\n"
275 "x3: %08x x4: %08x x5: %08x xx: %08x\n",
276 sf->structptr, sf->xargs[0], sf->xargs[1],
277 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
278 sf->xxargs[0]);
279 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
280 size -= STACKFRAME32_SZ;
281 stk = (unsigned *)((unsigned long)sf + STACKFRAME32_SZ);
282 i = 0;
283 do {
284 printk("s%d: %08x\n", i++, *stk++);
285 } while ((size -= sizeof(unsigned)));
286}
287
288#ifdef CONFIG_SMP
289static DEFINE_SPINLOCK(regdump_lock);
290#endif
291
292void __show_regs(struct pt_regs * regs)
293{
294#ifdef CONFIG_SMP
295 unsigned long flags;
296
297 /* Protect against xcall ipis which might lead to livelock on the lock */
298 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
299 "wrpr %0, %1, %%pstate"
300 : "=r" (flags)
301 : "i" (PSTATE_IE));
302 spin_lock(&regdump_lock);
303#endif
304 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
305 regs->tpc, regs->tnpc, regs->y, print_tainted());
306 print_symbol("TPC: <%s>\n", regs->tpc);
307 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
308 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
309 regs->u_regs[3]);
310 printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
311 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
312 regs->u_regs[7]);
313 printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
314 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
315 regs->u_regs[11]);
316 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
317 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
318 regs->u_regs[15]);
319 print_symbol("RPC: <%s>\n", regs->u_regs[15]);
320 show_regwindow(regs);
321#ifdef CONFIG_SMP
322 spin_unlock(&regdump_lock);
323 __asm__ __volatile__("wrpr %0, 0, %%pstate"
324 : : "r" (flags));
325#endif
326}
327
328#ifdef VERBOSE_SHOWREGS
329static void idump_from_user (unsigned int *pc)
330{
331 int i;
332 int code;
333
334 if((((unsigned long) pc) & 3))
335 return;
336
337 pc -= 3;
338 for(i = -3; i < 6; i++) {
339 get_user(code, pc);
340 printk("%c%08x%c",i?' ':'<',code,i?' ':'>');
341 pc++;
342 }
343 printk("\n");
344}
345#endif
346
347void show_regs(struct pt_regs *regs)
348{
349#ifdef VERBOSE_SHOWREGS
350 extern long etrap, etraptl1;
351#endif
352 __show_regs(regs);
353#ifdef CONFIG_SMP
354 {
355 extern void smp_report_regs(void);
356
357 smp_report_regs();
358 }
359#endif
360
361#ifdef VERBOSE_SHOWREGS
362 if (regs->tpc >= &etrap && regs->tpc < &etraptl1 &&
363 regs->u_regs[14] >= (long)current - PAGE_SIZE &&
364 regs->u_regs[14] < (long)current + 6 * PAGE_SIZE) {
365 printk ("*********parent**********\n");
366 __show_regs((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF));
367 idump_from_user(((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF))->tpc);
368 printk ("*********endpar**********\n");
369 }
370#endif
371}
372
373void show_regs32(struct pt_regs32 *regs)
374{
375 printk("PSR: %08x PC: %08x NPC: %08x Y: %08x %s\n", regs->psr,
376 regs->pc, regs->npc, regs->y, print_tainted());
377 printk("g0: %08x g1: %08x g2: %08x g3: %08x ",
378 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
379 regs->u_regs[3]);
380 printk("g4: %08x g5: %08x g6: %08x g7: %08x\n",
381 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
382 regs->u_regs[7]);
383 printk("o0: %08x o1: %08x o2: %08x o3: %08x ",
384 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
385 regs->u_regs[11]);
386 printk("o4: %08x o5: %08x sp: %08x ret_pc: %08x\n",
387 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
388 regs->u_regs[15]);
389}
390
391unsigned long thread_saved_pc(struct task_struct *tsk)
392{
393 struct thread_info *ti = tsk->thread_info;
394 unsigned long ret = 0xdeadbeefUL;
395
396 if (ti && ti->ksp) {
397 unsigned long *sp;
398 sp = (unsigned long *)(ti->ksp + STACK_BIAS);
399 if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
400 sp[14]) {
401 unsigned long *fp;
402 fp = (unsigned long *)(sp[14] + STACK_BIAS);
403 if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
404 ret = fp[15];
405 }
406 }
407 return ret;
408}
409
410/* Free current thread data structures etc.. */
411void exit_thread(void)
412{
413 struct thread_info *t = current_thread_info();
414
415 if (t->utraps) {
416 if (t->utraps[0] < 2)
417 kfree (t->utraps);
418 else
419 t->utraps[0]--;
420 }
421
422 if (test_and_clear_thread_flag(TIF_PERFCTR)) {
423 t->user_cntd0 = t->user_cntd1 = NULL;
424 t->pcr_reg = 0;
425 write_pcr(0);
426 }
427}
428
429void flush_thread(void)
430{
431 struct thread_info *t = current_thread_info();
432
433 if (t->flags & _TIF_ABI_PENDING)
434 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
435
436 if (t->task->mm) {
437 unsigned long pgd_cache = 0UL;
438 if (test_thread_flag(TIF_32BIT)) {
439 struct mm_struct *mm = t->task->mm;
440 pgd_t *pgd0 = &mm->pgd[0];
441 pud_t *pud0 = pud_offset(pgd0, 0);
442
443 if (pud_none(*pud0)) {
444 pmd_t *page = pmd_alloc_one(mm, 0);
445 pud_set(pud0, page);
446 }
447 pgd_cache = get_pgd_cache(pgd0);
448 }
449 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
450 "membar #Sync"
451 : /* no outputs */
452 : "r" (pgd_cache),
453 "r" (TSB_REG),
454 "i" (ASI_DMMU));
455 }
456 set_thread_wsaved(0);
457
458 /* Turn off performance counters if on. */
459 if (test_and_clear_thread_flag(TIF_PERFCTR)) {
460 t->user_cntd0 = t->user_cntd1 = NULL;
461 t->pcr_reg = 0;
462 write_pcr(0);
463 }
464
465 /* Clear FPU register state. */
466 t->fpsaved[0] = 0;
467
468 if (get_thread_current_ds() != ASI_AIUS)
469 set_fs(USER_DS);
470
471 /* Init new signal delivery disposition. */
472 clear_thread_flag(TIF_NEWSIGNALS);
473}
474
475/* It's a bit more tricky when 64-bit tasks are involved... */
476static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
477{
478 unsigned long fp, distance, rval;
479
480 if (!(test_thread_flag(TIF_32BIT))) {
481 csp += STACK_BIAS;
482 psp += STACK_BIAS;
483 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
484 fp += STACK_BIAS;
485 } else
486 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
487
488 /* Now 8-byte align the stack as this is mandatory in the
489 * Sparc ABI due to how register windows work. This hides
490 * the restriction from thread libraries etc. -DaveM
491 */
492 csp &= ~7UL;
493
494 distance = fp - psp;
495 rval = (csp - distance);
496 if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
497 rval = 0;
498 else if (test_thread_flag(TIF_32BIT)) {
499 if (put_user(((u32)csp),
500 &(((struct reg_window32 __user *)rval)->ins[6])))
501 rval = 0;
502 } else {
503 if (put_user(((u64)csp - STACK_BIAS),
504 &(((struct reg_window __user *)rval)->ins[6])))
505 rval = 0;
506 else
507 rval = rval - STACK_BIAS;
508 }
509
510 return rval;
511}
512
513/* Standard stuff. */
514static inline void shift_window_buffer(int first_win, int last_win,
515 struct thread_info *t)
516{
517 int i;
518
519 for (i = first_win; i < last_win; i++) {
520 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
521 memcpy(&t->reg_window[i], &t->reg_window[i+1],
522 sizeof(struct reg_window));
523 }
524}
525
526void synchronize_user_stack(void)
527{
528 struct thread_info *t = current_thread_info();
529 unsigned long window;
530
531 flush_user_windows();
532 if ((window = get_thread_wsaved()) != 0) {
533 int winsize = sizeof(struct reg_window);
534 int bias = 0;
535
536 if (test_thread_flag(TIF_32BIT))
537 winsize = sizeof(struct reg_window32);
538 else
539 bias = STACK_BIAS;
540
541 window -= 1;
542 do {
543 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
544 struct reg_window *rwin = &t->reg_window[window];
545
546 if (!copy_to_user((char __user *)sp, rwin, winsize)) {
547 shift_window_buffer(window, get_thread_wsaved() - 1, t);
548 set_thread_wsaved(get_thread_wsaved() - 1);
549 }
550 } while (window--);
551 }
552}
553
554void fault_in_user_windows(void)
555{
556 struct thread_info *t = current_thread_info();
557 unsigned long window;
558 int winsize = sizeof(struct reg_window);
559 int bias = 0;
560
561 if (test_thread_flag(TIF_32BIT))
562 winsize = sizeof(struct reg_window32);
563 else
564 bias = STACK_BIAS;
565
566 flush_user_windows();
567 window = get_thread_wsaved();
568
569 if (window != 0) {
570 window -= 1;
571 do {
572 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
573 struct reg_window *rwin = &t->reg_window[window];
574
575 if (copy_to_user((char __user *)sp, rwin, winsize))
576 goto barf;
577 } while (window--);
578 }
579 set_thread_wsaved(0);
580 return;
581
582barf:
583 set_thread_wsaved(window + 1);
584 do_exit(SIGILL);
585}
586
587asmlinkage long sparc_do_fork(unsigned long clone_flags,
588 unsigned long stack_start,
589 struct pt_regs *regs,
590 unsigned long stack_size)
591{
592 int __user *parent_tid_ptr, *child_tid_ptr;
593
594#ifdef CONFIG_COMPAT
595 if (test_thread_flag(TIF_32BIT)) {
596 parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
597 child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
598 } else
599#endif
600 {
601 parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
602 child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
603 }
604
605 return do_fork(clone_flags, stack_start,
606 regs, stack_size,
607 parent_tid_ptr, child_tid_ptr);
608}
609
610/* Copy a Sparc thread. The fork() return value conventions
611 * under SunOS are nothing short of bletcherous:
612 * Parent --> %o0 == childs pid, %o1 == 0
613 * Child --> %o0 == parents pid, %o1 == 1
614 */
615int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
616 unsigned long unused,
617 struct task_struct *p, struct pt_regs *regs)
618{
619 struct thread_info *t = p->thread_info;
620 char *child_trap_frame;
621
1da177e4
LT
622 /* Calculate offset to stack_frame & pt_regs */
623 child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
624 memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
625
626 t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
1da177e4 627 (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT);
db7d9a4e 628 t->new_child = 1;
1da177e4
LT
629 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
630 t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf));
631 t->fpsaved[0] = 0;
632
633 if (regs->tstate & TSTATE_PRIV) {
634 /* Special case, if we are spawning a kernel thread from
635 * a userspace task (via KMOD, NFS, or similar) we must
636 * disable performance counters in the child because the
637 * address space and protection realm are changing.
638 */
639 if (t->flags & _TIF_PERFCTR) {
640 t->user_cntd0 = t->user_cntd1 = NULL;
641 t->pcr_reg = 0;
642 t->flags &= ~_TIF_PERFCTR;
643 }
644 t->kregs->u_regs[UREG_FP] = t->ksp;
645 t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT);
646 flush_register_windows();
647 memcpy((void *)(t->ksp + STACK_BIAS),
648 (void *)(regs->u_regs[UREG_FP] + STACK_BIAS),
649 sizeof(struct sparc_stackf));
650 t->kregs->u_regs[UREG_G6] = (unsigned long) t;
651 t->kregs->u_regs[UREG_G4] = (unsigned long) t->task;
652 } else {
653 if (t->flags & _TIF_32BIT) {
654 sp &= 0x00000000ffffffffUL;
655 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
656 }
657 t->kregs->u_regs[UREG_FP] = sp;
658 t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT);
659 if (sp != regs->u_regs[UREG_FP]) {
660 unsigned long csp;
661
662 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
663 if (!csp)
664 return -EFAULT;
665 t->kregs->u_regs[UREG_FP] = csp;
666 }
667 if (t->utraps)
668 t->utraps[0]++;
669 }
670
671 /* Set the return value for the child. */
672 t->kregs->u_regs[UREG_I0] = current->pid;
673 t->kregs->u_regs[UREG_I1] = 1;
674
675 /* Set the second return value for the parent. */
676 regs->u_regs[UREG_I1] = 0;
677
678 if (clone_flags & CLONE_SETTLS)
679 t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
680
681 return 0;
682}
683
684/*
685 * This is the mechanism for creating a new kernel thread.
686 *
687 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
688 * who haven't done an "execve()") should use this: it will work within
689 * a system call from a "real" process, but the process memory space will
690 * not be free'd until both the parent and the child have exited.
691 */
692pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
693{
694 long retval;
695
696 /* If the parent runs before fn(arg) is called by the child,
697 * the input registers of this function can be clobbered.
698 * So we stash 'fn' and 'arg' into global registers which
699 * will not be modified by the parent.
700 */
701 __asm__ __volatile__("mov %4, %%g2\n\t" /* Save FN into global */
702 "mov %5, %%g3\n\t" /* Save ARG into global */
703 "mov %1, %%g1\n\t" /* Clone syscall nr. */
704 "mov %2, %%o0\n\t" /* Clone flags. */
705 "mov 0, %%o1\n\t" /* usp arg == 0 */
706 "t 0x6d\n\t" /* Linux/Sparc clone(). */
707 "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */
708 " mov %%o0, %0\n\t"
709 "jmpl %%g2, %%o7\n\t" /* Call the function. */
710 " mov %%g3, %%o0\n\t" /* Set arg in delay. */
711 "mov %3, %%g1\n\t"
712 "t 0x6d\n\t" /* Linux/Sparc exit(). */
713 /* Notreached by child. */
714 "1:" :
715 "=r" (retval) :
716 "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
717 "i" (__NR_exit), "r" (fn), "r" (arg) :
718 "g1", "g2", "g3", "o0", "o1", "memory", "cc");
719 return retval;
720}
721
722/*
723 * fill in the user structure for a core dump..
724 */
725void dump_thread(struct pt_regs * regs, struct user * dump)
726{
727 /* Only should be used for SunOS and ancient a.out
728 * SparcLinux binaries... Not worth implementing.
729 */
730 memset(dump, 0, sizeof(struct user));
731}
732
733typedef struct {
734 union {
735 unsigned int pr_regs[32];
736 unsigned long pr_dregs[16];
737 } pr_fr;
738 unsigned int __unused;
739 unsigned int pr_fsr;
740 unsigned char pr_qcnt;
741 unsigned char pr_q_entrysize;
742 unsigned char pr_en;
743 unsigned int pr_q[64];
744} elf_fpregset_t32;
745
746/*
747 * fill in the fpu structure for a core dump.
748 */
749int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
750{
751 unsigned long *kfpregs = current_thread_info()->fpregs;
752 unsigned long fprs = current_thread_info()->fpsaved[0];
753
754 if (test_thread_flag(TIF_32BIT)) {
755 elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
756
757 if (fprs & FPRS_DL)
758 memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
759 sizeof(unsigned int) * 32);
760 else
761 memset(&fpregs32->pr_fr.pr_regs[0], 0,
762 sizeof(unsigned int) * 32);
763 fpregs32->pr_qcnt = 0;
764 fpregs32->pr_q_entrysize = 8;
765 memset(&fpregs32->pr_q[0], 0,
766 (sizeof(unsigned int) * 64));
767 if (fprs & FPRS_FEF) {
768 fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
769 fpregs32->pr_en = 1;
770 } else {
771 fpregs32->pr_fsr = 0;
772 fpregs32->pr_en = 0;
773 }
774 } else {
775 if(fprs & FPRS_DL)
776 memcpy(&fpregs->pr_regs[0], kfpregs,
777 sizeof(unsigned int) * 32);
778 else
779 memset(&fpregs->pr_regs[0], 0,
780 sizeof(unsigned int) * 32);
781 if(fprs & FPRS_DU)
782 memcpy(&fpregs->pr_regs[16], kfpregs+16,
783 sizeof(unsigned int) * 32);
784 else
785 memset(&fpregs->pr_regs[16], 0,
786 sizeof(unsigned int) * 32);
787 if(fprs & FPRS_FEF) {
788 fpregs->pr_fsr = current_thread_info()->xfsr[0];
789 fpregs->pr_gsr = current_thread_info()->gsr[0];
790 } else {
791 fpregs->pr_fsr = fpregs->pr_gsr = 0;
792 }
793 fpregs->pr_fprs = fprs;
794 }
795 return 1;
796}
797
798/*
799 * sparc_execve() executes a new program after the asm stub has set
800 * things up for us. This should basically do what I want it to.
801 */
802asmlinkage int sparc_execve(struct pt_regs *regs)
803{
804 int error, base = 0;
805 char *filename;
806
807 /* User register window flush is done by entry.S */
808
809 /* Check for indirect call. */
810 if (regs->u_regs[UREG_G1] == 0)
811 base = 1;
812
813 filename = getname((char __user *)regs->u_regs[base + UREG_I0]);
814 error = PTR_ERR(filename);
815 if (IS_ERR(filename))
816 goto out;
817 error = do_execve(filename,
818 (char __user * __user *)
819 regs->u_regs[base + UREG_I1],
820 (char __user * __user *)
821 regs->u_regs[base + UREG_I2], regs);
822 putname(filename);
823 if (!error) {
824 fprs_write(0);
825 current_thread_info()->xfsr[0] = 0;
826 current_thread_info()->fpsaved[0] = 0;
827 regs->tstate &= ~TSTATE_PEF;
828 task_lock(current);
829 current->ptrace &= ~PT_DTRACE;
830 task_unlock(current);
831 }
832out:
833 return error;
834}
835
836unsigned long get_wchan(struct task_struct *task)
837{
838 unsigned long pc, fp, bias = 0;
839 unsigned long thread_info_base;
840 struct reg_window *rw;
841 unsigned long ret = 0;
842 int count = 0;
843
844 if (!task || task == current ||
845 task->state == TASK_RUNNING)
846 goto out;
847
848 thread_info_base = (unsigned long) task->thread_info;
849 bias = STACK_BIAS;
850 fp = task->thread_info->ksp + bias;
851
852 do {
853 /* Bogus frame pointer? */
854 if (fp < (thread_info_base + sizeof(struct thread_info)) ||
855 fp >= (thread_info_base + THREAD_SIZE))
856 break;
857 rw = (struct reg_window *) fp;
858 pc = rw->ins[7];
859 if (!in_sched_functions(pc)) {
860 ret = pc;
861 goto out;
862 }
863 fp = rw->ins[6] + bias;
864 } while (++count < 16);
865
866out:
867 return ret;
868}
This page took 0.26152 seconds and 5 git commands to generate.