[SPARC64]: More fully work around Spitfire Errata 51.
[deliverable/linux.git] / arch / sparc64 / kernel / process.c
1 /* $Id: process.c,v 1.131 2002/02/09 19:49:30 davem Exp $
2 * arch/sparc64/kernel/process.c
3 *
4 * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9 /*
10 * This file handles the architecture-dependent parts of process handling..
11 */
12
13 #include <stdarg.h>
14
15 #include <linux/config.h>
16 #include <linux/errno.h>
17 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/kallsyms.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/stddef.h>
25 #include <linux/ptrace.h>
26 #include <linux/slab.h>
27 #include <linux/user.h>
28 #include <linux/a.out.h>
29 #include <linux/config.h>
30 #include <linux/reboot.h>
31 #include <linux/delay.h>
32 #include <linux/compat.h>
33 #include <linux/init.h>
34
35 #include <asm/oplib.h>
36 #include <asm/uaccess.h>
37 #include <asm/system.h>
38 #include <asm/page.h>
39 #include <asm/pgalloc.h>
40 #include <asm/pgtable.h>
41 #include <asm/processor.h>
42 #include <asm/pstate.h>
43 #include <asm/elf.h>
44 #include <asm/fpumacro.h>
45 #include <asm/head.h>
46 #include <asm/cpudata.h>
47 #include <asm/unistd.h>
48
49 /* #define VERBOSE_SHOWREGS */
50
51 /*
52 * Nothing special yet...
53 */
54 void default_idle(void)
55 {
56 }
57
58 #ifndef CONFIG_SMP
59
60 /*
61 * the idle loop on a Sparc... ;)
62 */
63 void cpu_idle(void)
64 {
65 /* endless idle loop with no priority at all */
66 for (;;) {
67 /* If current->work.need_resched is zero we should really
68 * setup for a system wakup event and execute a shutdown
69 * instruction.
70 *
71 * But this requires writing back the contents of the
72 * L2 cache etc. so implement this later. -DaveM
73 */
74 while (!need_resched())
75 barrier();
76
77 schedule();
78 check_pgt_cache();
79 }
80 }
81
82 #else
83
84 /*
85 * the idle loop on a UltraMultiPenguin...
86 */
87 #define idle_me_harder() (cpu_data(smp_processor_id()).idle_volume += 1)
88 #define unidle_me() (cpu_data(smp_processor_id()).idle_volume = 0)
89 void cpu_idle(void)
90 {
91 set_thread_flag(TIF_POLLING_NRFLAG);
92 while(1) {
93 if (need_resched()) {
94 unidle_me();
95 clear_thread_flag(TIF_POLLING_NRFLAG);
96 schedule();
97 set_thread_flag(TIF_POLLING_NRFLAG);
98 check_pgt_cache();
99 }
100 idle_me_harder();
101
102 /* The store ordering is so that IRQ handlers on
103 * other cpus see our increasing idleness for the buddy
104 * redistribution algorithm. -DaveM
105 */
106 membar_storeload_storestore();
107 }
108 }
109
110 #endif
111
112 extern char reboot_command [];
113
114 extern void (*prom_palette)(int);
115 extern void (*prom_keyboard)(void);
116
117 void machine_halt(void)
118 {
119 if (!serial_console && prom_palette)
120 prom_palette (1);
121 if (prom_keyboard)
122 prom_keyboard();
123 prom_halt();
124 panic("Halt failed!");
125 }
126
127 void machine_alt_power_off(void)
128 {
129 if (!serial_console && prom_palette)
130 prom_palette(1);
131 if (prom_keyboard)
132 prom_keyboard();
133 prom_halt_power_off();
134 panic("Power-off failed!");
135 }
136
137 void machine_restart(char * cmd)
138 {
139 char *p;
140
141 p = strchr (reboot_command, '\n');
142 if (p) *p = 0;
143 if (!serial_console && prom_palette)
144 prom_palette (1);
145 if (prom_keyboard)
146 prom_keyboard();
147 if (cmd)
148 prom_reboot(cmd);
149 if (*reboot_command)
150 prom_reboot(reboot_command);
151 prom_reboot("");
152 panic("Reboot failed!");
153 }
154
155 static void show_regwindow32(struct pt_regs *regs)
156 {
157 struct reg_window32 __user *rw;
158 struct reg_window32 r_w;
159 mm_segment_t old_fs;
160
161 __asm__ __volatile__ ("flushw");
162 rw = compat_ptr((unsigned)regs->u_regs[14]);
163 old_fs = get_fs();
164 set_fs (USER_DS);
165 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
166 set_fs (old_fs);
167 return;
168 }
169
170 set_fs (old_fs);
171 printk("l0: %08x l1: %08x l2: %08x l3: %08x "
172 "l4: %08x l5: %08x l6: %08x l7: %08x\n",
173 r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
174 r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
175 printk("i0: %08x i1: %08x i2: %08x i3: %08x "
176 "i4: %08x i5: %08x i6: %08x i7: %08x\n",
177 r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
178 r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
179 }
180
181 static void show_regwindow(struct pt_regs *regs)
182 {
183 struct reg_window __user *rw;
184 struct reg_window *rwk;
185 struct reg_window r_w;
186 mm_segment_t old_fs;
187
188 if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
189 __asm__ __volatile__ ("flushw");
190 rw = (struct reg_window __user *)
191 (regs->u_regs[14] + STACK_BIAS);
192 rwk = (struct reg_window *)
193 (regs->u_regs[14] + STACK_BIAS);
194 if (!(regs->tstate & TSTATE_PRIV)) {
195 old_fs = get_fs();
196 set_fs (USER_DS);
197 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
198 set_fs (old_fs);
199 return;
200 }
201 rwk = &r_w;
202 set_fs (old_fs);
203 }
204 } else {
205 show_regwindow32(regs);
206 return;
207 }
208 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
209 rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
210 printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
211 rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
212 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
213 rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
214 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
215 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
216 if (regs->tstate & TSTATE_PRIV)
217 print_symbol("I7: <%s>\n", rwk->ins[7]);
218 }
219
220 void show_stackframe(struct sparc_stackf *sf)
221 {
222 unsigned long size;
223 unsigned long *stk;
224 int i;
225
226 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n"
227 "l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
228 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
229 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
230 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n"
231 "i4: %016lx i5: %016lx fp: %016lx ret_pc: %016lx\n",
232 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
233 sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
234 printk("sp: %016lx x0: %016lx x1: %016lx x2: %016lx\n"
235 "x3: %016lx x4: %016lx x5: %016lx xx: %016lx\n",
236 (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
237 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
238 sf->xxargs[0]);
239 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
240 size -= STACKFRAME_SZ;
241 stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ);
242 i = 0;
243 do {
244 printk("s%d: %016lx\n", i++, *stk++);
245 } while ((size -= sizeof(unsigned long)));
246 }
247
248 void show_stackframe32(struct sparc_stackf32 *sf)
249 {
250 unsigned long size;
251 unsigned *stk;
252 int i;
253
254 printk("l0: %08x l1: %08x l2: %08x l3: %08x\n",
255 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3]);
256 printk("l4: %08x l5: %08x l6: %08x l7: %08x\n",
257 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
258 printk("i0: %08x i1: %08x i2: %08x i3: %08x\n",
259 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3]);
260 printk("i4: %08x i5: %08x fp: %08x ret_pc: %08x\n",
261 sf->ins[4], sf->ins[5], sf->fp, sf->callers_pc);
262 printk("sp: %08x x0: %08x x1: %08x x2: %08x\n"
263 "x3: %08x x4: %08x x5: %08x xx: %08x\n",
264 sf->structptr, sf->xargs[0], sf->xargs[1],
265 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
266 sf->xxargs[0]);
267 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
268 size -= STACKFRAME32_SZ;
269 stk = (unsigned *)((unsigned long)sf + STACKFRAME32_SZ);
270 i = 0;
271 do {
272 printk("s%d: %08x\n", i++, *stk++);
273 } while ((size -= sizeof(unsigned)));
274 }
275
276 #ifdef CONFIG_SMP
277 static DEFINE_SPINLOCK(regdump_lock);
278 #endif
279
280 void __show_regs(struct pt_regs * regs)
281 {
282 #ifdef CONFIG_SMP
283 unsigned long flags;
284
285 /* Protect against xcall ipis which might lead to livelock on the lock */
286 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
287 "wrpr %0, %1, %%pstate"
288 : "=r" (flags)
289 : "i" (PSTATE_IE));
290 spin_lock(&regdump_lock);
291 #endif
292 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
293 regs->tpc, regs->tnpc, regs->y, print_tainted());
294 print_symbol("TPC: <%s>\n", regs->tpc);
295 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
296 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
297 regs->u_regs[3]);
298 printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
299 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
300 regs->u_regs[7]);
301 printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
302 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
303 regs->u_regs[11]);
304 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
305 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
306 regs->u_regs[15]);
307 print_symbol("RPC: <%s>\n", regs->u_regs[15]);
308 show_regwindow(regs);
309 #ifdef CONFIG_SMP
310 spin_unlock(&regdump_lock);
311 __asm__ __volatile__("wrpr %0, 0, %%pstate"
312 : : "r" (flags));
313 #endif
314 }
315
316 #ifdef VERBOSE_SHOWREGS
317 static void idump_from_user (unsigned int *pc)
318 {
319 int i;
320 int code;
321
322 if((((unsigned long) pc) & 3))
323 return;
324
325 pc -= 3;
326 for(i = -3; i < 6; i++) {
327 get_user(code, pc);
328 printk("%c%08x%c",i?' ':'<',code,i?' ':'>');
329 pc++;
330 }
331 printk("\n");
332 }
333 #endif
334
335 void show_regs(struct pt_regs *regs)
336 {
337 #ifdef VERBOSE_SHOWREGS
338 extern long etrap, etraptl1;
339 #endif
340 __show_regs(regs);
341 #ifdef CONFIG_SMP
342 {
343 extern void smp_report_regs(void);
344
345 smp_report_regs();
346 }
347 #endif
348
349 #ifdef VERBOSE_SHOWREGS
350 if (regs->tpc >= &etrap && regs->tpc < &etraptl1 &&
351 regs->u_regs[14] >= (long)current - PAGE_SIZE &&
352 regs->u_regs[14] < (long)current + 6 * PAGE_SIZE) {
353 printk ("*********parent**********\n");
354 __show_regs((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF));
355 idump_from_user(((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF))->tpc);
356 printk ("*********endpar**********\n");
357 }
358 #endif
359 }
360
361 void show_regs32(struct pt_regs32 *regs)
362 {
363 printk("PSR: %08x PC: %08x NPC: %08x Y: %08x %s\n", regs->psr,
364 regs->pc, regs->npc, regs->y, print_tainted());
365 printk("g0: %08x g1: %08x g2: %08x g3: %08x ",
366 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
367 regs->u_regs[3]);
368 printk("g4: %08x g5: %08x g6: %08x g7: %08x\n",
369 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
370 regs->u_regs[7]);
371 printk("o0: %08x o1: %08x o2: %08x o3: %08x ",
372 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
373 regs->u_regs[11]);
374 printk("o4: %08x o5: %08x sp: %08x ret_pc: %08x\n",
375 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
376 regs->u_regs[15]);
377 }
378
379 unsigned long thread_saved_pc(struct task_struct *tsk)
380 {
381 struct thread_info *ti = tsk->thread_info;
382 unsigned long ret = 0xdeadbeefUL;
383
384 if (ti && ti->ksp) {
385 unsigned long *sp;
386 sp = (unsigned long *)(ti->ksp + STACK_BIAS);
387 if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
388 sp[14]) {
389 unsigned long *fp;
390 fp = (unsigned long *)(sp[14] + STACK_BIAS);
391 if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
392 ret = fp[15];
393 }
394 }
395 return ret;
396 }
397
398 /* Free current thread data structures etc.. */
399 void exit_thread(void)
400 {
401 struct thread_info *t = current_thread_info();
402
403 if (t->utraps) {
404 if (t->utraps[0] < 2)
405 kfree (t->utraps);
406 else
407 t->utraps[0]--;
408 }
409
410 if (test_and_clear_thread_flag(TIF_PERFCTR)) {
411 t->user_cntd0 = t->user_cntd1 = NULL;
412 t->pcr_reg = 0;
413 write_pcr(0);
414 }
415 }
416
417 void flush_thread(void)
418 {
419 struct thread_info *t = current_thread_info();
420
421 if (t->flags & _TIF_ABI_PENDING)
422 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
423
424 if (t->task->mm) {
425 unsigned long pgd_cache = 0UL;
426 if (test_thread_flag(TIF_32BIT)) {
427 struct mm_struct *mm = t->task->mm;
428 pgd_t *pgd0 = &mm->pgd[0];
429 pud_t *pud0 = pud_offset(pgd0, 0);
430
431 if (pud_none(*pud0)) {
432 pmd_t *page = pmd_alloc_one(mm, 0);
433 pud_set(pud0, page);
434 }
435 pgd_cache = get_pgd_cache(pgd0);
436 }
437 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
438 "membar #Sync"
439 : /* no outputs */
440 : "r" (pgd_cache),
441 "r" (TSB_REG),
442 "i" (ASI_DMMU));
443 }
444 set_thread_wsaved(0);
445
446 /* Turn off performance counters if on. */
447 if (test_and_clear_thread_flag(TIF_PERFCTR)) {
448 t->user_cntd0 = t->user_cntd1 = NULL;
449 t->pcr_reg = 0;
450 write_pcr(0);
451 }
452
453 /* Clear FPU register state. */
454 t->fpsaved[0] = 0;
455
456 if (get_thread_current_ds() != ASI_AIUS)
457 set_fs(USER_DS);
458
459 /* Init new signal delivery disposition. */
460 clear_thread_flag(TIF_NEWSIGNALS);
461 }
462
463 /* It's a bit more tricky when 64-bit tasks are involved... */
464 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
465 {
466 unsigned long fp, distance, rval;
467
468 if (!(test_thread_flag(TIF_32BIT))) {
469 csp += STACK_BIAS;
470 psp += STACK_BIAS;
471 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
472 fp += STACK_BIAS;
473 } else
474 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
475
476 /* Now 8-byte align the stack as this is mandatory in the
477 * Sparc ABI due to how register windows work. This hides
478 * the restriction from thread libraries etc. -DaveM
479 */
480 csp &= ~7UL;
481
482 distance = fp - psp;
483 rval = (csp - distance);
484 if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
485 rval = 0;
486 else if (test_thread_flag(TIF_32BIT)) {
487 if (put_user(((u32)csp),
488 &(((struct reg_window32 __user *)rval)->ins[6])))
489 rval = 0;
490 } else {
491 if (put_user(((u64)csp - STACK_BIAS),
492 &(((struct reg_window __user *)rval)->ins[6])))
493 rval = 0;
494 else
495 rval = rval - STACK_BIAS;
496 }
497
498 return rval;
499 }
500
501 /* Standard stuff. */
502 static inline void shift_window_buffer(int first_win, int last_win,
503 struct thread_info *t)
504 {
505 int i;
506
507 for (i = first_win; i < last_win; i++) {
508 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
509 memcpy(&t->reg_window[i], &t->reg_window[i+1],
510 sizeof(struct reg_window));
511 }
512 }
513
514 void synchronize_user_stack(void)
515 {
516 struct thread_info *t = current_thread_info();
517 unsigned long window;
518
519 flush_user_windows();
520 if ((window = get_thread_wsaved()) != 0) {
521 int winsize = sizeof(struct reg_window);
522 int bias = 0;
523
524 if (test_thread_flag(TIF_32BIT))
525 winsize = sizeof(struct reg_window32);
526 else
527 bias = STACK_BIAS;
528
529 window -= 1;
530 do {
531 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
532 struct reg_window *rwin = &t->reg_window[window];
533
534 if (!copy_to_user((char __user *)sp, rwin, winsize)) {
535 shift_window_buffer(window, get_thread_wsaved() - 1, t);
536 set_thread_wsaved(get_thread_wsaved() - 1);
537 }
538 } while (window--);
539 }
540 }
541
542 void fault_in_user_windows(void)
543 {
544 struct thread_info *t = current_thread_info();
545 unsigned long window;
546 int winsize = sizeof(struct reg_window);
547 int bias = 0;
548
549 if (test_thread_flag(TIF_32BIT))
550 winsize = sizeof(struct reg_window32);
551 else
552 bias = STACK_BIAS;
553
554 flush_user_windows();
555 window = get_thread_wsaved();
556
557 if (window != 0) {
558 window -= 1;
559 do {
560 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
561 struct reg_window *rwin = &t->reg_window[window];
562
563 if (copy_to_user((char __user *)sp, rwin, winsize))
564 goto barf;
565 } while (window--);
566 }
567 set_thread_wsaved(0);
568 return;
569
570 barf:
571 set_thread_wsaved(window + 1);
572 do_exit(SIGILL);
573 }
574
575 asmlinkage long sparc_do_fork(unsigned long clone_flags,
576 unsigned long stack_start,
577 struct pt_regs *regs,
578 unsigned long stack_size)
579 {
580 int __user *parent_tid_ptr, *child_tid_ptr;
581
582 #ifdef CONFIG_COMPAT
583 if (test_thread_flag(TIF_32BIT)) {
584 parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
585 child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
586 } else
587 #endif
588 {
589 parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
590 child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
591 }
592
593 return do_fork(clone_flags, stack_start,
594 regs, stack_size,
595 parent_tid_ptr, child_tid_ptr);
596 }
597
598 /* Copy a Sparc thread. The fork() return value conventions
599 * under SunOS are nothing short of bletcherous:
600 * Parent --> %o0 == childs pid, %o1 == 0
601 * Child --> %o0 == parents pid, %o1 == 1
602 */
603 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
604 unsigned long unused,
605 struct task_struct *p, struct pt_regs *regs)
606 {
607 struct thread_info *t = p->thread_info;
608 char *child_trap_frame;
609
610 #ifdef CONFIG_DEBUG_SPINLOCK
611 p->thread.smp_lock_count = 0;
612 p->thread.smp_lock_pc = 0;
613 #endif
614
615 /* Calculate offset to stack_frame & pt_regs */
616 child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
617 memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
618
619 t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
620 (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT);
621 t->new_child = 1;
622 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
623 t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf));
624 t->fpsaved[0] = 0;
625
626 if (regs->tstate & TSTATE_PRIV) {
627 /* Special case, if we are spawning a kernel thread from
628 * a userspace task (via KMOD, NFS, or similar) we must
629 * disable performance counters in the child because the
630 * address space and protection realm are changing.
631 */
632 if (t->flags & _TIF_PERFCTR) {
633 t->user_cntd0 = t->user_cntd1 = NULL;
634 t->pcr_reg = 0;
635 t->flags &= ~_TIF_PERFCTR;
636 }
637 t->kregs->u_regs[UREG_FP] = t->ksp;
638 t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT);
639 flush_register_windows();
640 memcpy((void *)(t->ksp + STACK_BIAS),
641 (void *)(regs->u_regs[UREG_FP] + STACK_BIAS),
642 sizeof(struct sparc_stackf));
643 t->kregs->u_regs[UREG_G6] = (unsigned long) t;
644 t->kregs->u_regs[UREG_G4] = (unsigned long) t->task;
645 } else {
646 if (t->flags & _TIF_32BIT) {
647 sp &= 0x00000000ffffffffUL;
648 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
649 }
650 t->kregs->u_regs[UREG_FP] = sp;
651 t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT);
652 if (sp != regs->u_regs[UREG_FP]) {
653 unsigned long csp;
654
655 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
656 if (!csp)
657 return -EFAULT;
658 t->kregs->u_regs[UREG_FP] = csp;
659 }
660 if (t->utraps)
661 t->utraps[0]++;
662 }
663
664 /* Set the return value for the child. */
665 t->kregs->u_regs[UREG_I0] = current->pid;
666 t->kregs->u_regs[UREG_I1] = 1;
667
668 /* Set the second return value for the parent. */
669 regs->u_regs[UREG_I1] = 0;
670
671 if (clone_flags & CLONE_SETTLS)
672 t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
673
674 return 0;
675 }
676
677 /*
678 * This is the mechanism for creating a new kernel thread.
679 *
680 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
681 * who haven't done an "execve()") should use this: it will work within
682 * a system call from a "real" process, but the process memory space will
683 * not be free'd until both the parent and the child have exited.
684 */
685 pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
686 {
687 long retval;
688
689 /* If the parent runs before fn(arg) is called by the child,
690 * the input registers of this function can be clobbered.
691 * So we stash 'fn' and 'arg' into global registers which
692 * will not be modified by the parent.
693 */
694 __asm__ __volatile__("mov %4, %%g2\n\t" /* Save FN into global */
695 "mov %5, %%g3\n\t" /* Save ARG into global */
696 "mov %1, %%g1\n\t" /* Clone syscall nr. */
697 "mov %2, %%o0\n\t" /* Clone flags. */
698 "mov 0, %%o1\n\t" /* usp arg == 0 */
699 "t 0x6d\n\t" /* Linux/Sparc clone(). */
700 "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */
701 " mov %%o0, %0\n\t"
702 "jmpl %%g2, %%o7\n\t" /* Call the function. */
703 " mov %%g3, %%o0\n\t" /* Set arg in delay. */
704 "mov %3, %%g1\n\t"
705 "t 0x6d\n\t" /* Linux/Sparc exit(). */
706 /* Notreached by child. */
707 "1:" :
708 "=r" (retval) :
709 "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
710 "i" (__NR_exit), "r" (fn), "r" (arg) :
711 "g1", "g2", "g3", "o0", "o1", "memory", "cc");
712 return retval;
713 }
714
715 /*
716 * fill in the user structure for a core dump..
717 */
718 void dump_thread(struct pt_regs * regs, struct user * dump)
719 {
720 /* Only should be used for SunOS and ancient a.out
721 * SparcLinux binaries... Not worth implementing.
722 */
723 memset(dump, 0, sizeof(struct user));
724 }
725
726 typedef struct {
727 union {
728 unsigned int pr_regs[32];
729 unsigned long pr_dregs[16];
730 } pr_fr;
731 unsigned int __unused;
732 unsigned int pr_fsr;
733 unsigned char pr_qcnt;
734 unsigned char pr_q_entrysize;
735 unsigned char pr_en;
736 unsigned int pr_q[64];
737 } elf_fpregset_t32;
738
739 /*
740 * fill in the fpu structure for a core dump.
741 */
742 int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
743 {
744 unsigned long *kfpregs = current_thread_info()->fpregs;
745 unsigned long fprs = current_thread_info()->fpsaved[0];
746
747 if (test_thread_flag(TIF_32BIT)) {
748 elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
749
750 if (fprs & FPRS_DL)
751 memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
752 sizeof(unsigned int) * 32);
753 else
754 memset(&fpregs32->pr_fr.pr_regs[0], 0,
755 sizeof(unsigned int) * 32);
756 fpregs32->pr_qcnt = 0;
757 fpregs32->pr_q_entrysize = 8;
758 memset(&fpregs32->pr_q[0], 0,
759 (sizeof(unsigned int) * 64));
760 if (fprs & FPRS_FEF) {
761 fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
762 fpregs32->pr_en = 1;
763 } else {
764 fpregs32->pr_fsr = 0;
765 fpregs32->pr_en = 0;
766 }
767 } else {
768 if(fprs & FPRS_DL)
769 memcpy(&fpregs->pr_regs[0], kfpregs,
770 sizeof(unsigned int) * 32);
771 else
772 memset(&fpregs->pr_regs[0], 0,
773 sizeof(unsigned int) * 32);
774 if(fprs & FPRS_DU)
775 memcpy(&fpregs->pr_regs[16], kfpregs+16,
776 sizeof(unsigned int) * 32);
777 else
778 memset(&fpregs->pr_regs[16], 0,
779 sizeof(unsigned int) * 32);
780 if(fprs & FPRS_FEF) {
781 fpregs->pr_fsr = current_thread_info()->xfsr[0];
782 fpregs->pr_gsr = current_thread_info()->gsr[0];
783 } else {
784 fpregs->pr_fsr = fpregs->pr_gsr = 0;
785 }
786 fpregs->pr_fprs = fprs;
787 }
788 return 1;
789 }
790
791 /*
792 * sparc_execve() executes a new program after the asm stub has set
793 * things up for us. This should basically do what I want it to.
794 */
795 asmlinkage int sparc_execve(struct pt_regs *regs)
796 {
797 int error, base = 0;
798 char *filename;
799
800 /* User register window flush is done by entry.S */
801
802 /* Check for indirect call. */
803 if (regs->u_regs[UREG_G1] == 0)
804 base = 1;
805
806 filename = getname((char __user *)regs->u_regs[base + UREG_I0]);
807 error = PTR_ERR(filename);
808 if (IS_ERR(filename))
809 goto out;
810 error = do_execve(filename,
811 (char __user * __user *)
812 regs->u_regs[base + UREG_I1],
813 (char __user * __user *)
814 regs->u_regs[base + UREG_I2], regs);
815 putname(filename);
816 if (!error) {
817 fprs_write(0);
818 current_thread_info()->xfsr[0] = 0;
819 current_thread_info()->fpsaved[0] = 0;
820 regs->tstate &= ~TSTATE_PEF;
821 task_lock(current);
822 current->ptrace &= ~PT_DTRACE;
823 task_unlock(current);
824 }
825 out:
826 return error;
827 }
828
829 unsigned long get_wchan(struct task_struct *task)
830 {
831 unsigned long pc, fp, bias = 0;
832 unsigned long thread_info_base;
833 struct reg_window *rw;
834 unsigned long ret = 0;
835 int count = 0;
836
837 if (!task || task == current ||
838 task->state == TASK_RUNNING)
839 goto out;
840
841 thread_info_base = (unsigned long) task->thread_info;
842 bias = STACK_BIAS;
843 fp = task->thread_info->ksp + bias;
844
845 do {
846 /* Bogus frame pointer? */
847 if (fp < (thread_info_base + sizeof(struct thread_info)) ||
848 fp >= (thread_info_base + THREAD_SIZE))
849 break;
850 rw = (struct reg_window *) fp;
851 pc = rw->ins[7];
852 if (!in_sched_functions(pc)) {
853 ret = pc;
854 goto out;
855 }
856 fp = rw->ins[6] + bias;
857 } while (++count < 16);
858
859 out:
860 return ret;
861 }
This page took 0.066222 seconds and 5 git commands to generate.