Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/linkage.h> | |
0707ad30 | 16 | #include <linux/unistd.h> |
867e359b | 17 | #include <asm/irqflags.h> |
a78c942d | 18 | #include <asm/processor.h> |
0707ad30 | 19 | #include <arch/abi.h> |
a78c942d | 20 | #include <arch/spr_def.h> |
867e359b CM |
21 | |
22 | #ifdef __tilegx__ | |
23 | #define bnzt bnezt | |
24 | #endif | |
25 | ||
26 | STD_ENTRY(current_text_addr) | |
27 | { move r0, lr; jrp lr } | |
28 | STD_ENDPROC(current_text_addr) | |
29 | ||
867e359b CM |
30 | /* |
31 | * Implement execve(). The i386 code has a note that forking from kernel | |
32 | * space results in no copy on write until the execve, so we should be | |
33 | * careful not to write to the stack here. | |
34 | */ | |
35 | STD_ENTRY(kernel_execve) | |
36 | moveli TREG_SYSCALL_NR_NAME, __NR_execve | |
37 | swint1 | |
38 | jrp lr | |
39 | STD_ENDPROC(kernel_execve) | |
40 | ||
867e359b CM |
41 | /* |
42 | * We don't run this function directly, but instead copy it to a page | |
43 | * we map into every user process. See vdso_setup(). | |
44 | * | |
45 | * Note that libc has a copy of this function that it uses to compare | |
46 | * against the PC when a stack backtrace ends, so if this code is | |
47 | * changed, the libc implementation(s) should also be updated. | |
48 | */ | |
49 | .pushsection .data | |
50 | ENTRY(__rt_sigreturn) | |
51 | moveli TREG_SYSCALL_NR_NAME,__NR_rt_sigreturn | |
52 | swint1 | |
53 | ENDPROC(__rt_sigreturn) | |
54 | ENTRY(__rt_sigreturn_end) | |
55 | .popsection | |
56 | ||
57 | STD_ENTRY(dump_stack) | |
58 | { move r2, lr; lnk r1 } | |
59 | { move r4, r52; addli r1, r1, dump_stack - . } | |
60 | { move r3, sp; j _dump_stack } | |
61 | jrp lr /* keep backtracer happy */ | |
62 | STD_ENDPROC(dump_stack) | |
63 | ||
64 | STD_ENTRY(KBacktraceIterator_init_current) | |
65 | { move r2, lr; lnk r1 } | |
66 | { move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . } | |
67 | { move r3, sp; j _KBacktraceIterator_init_current } | |
68 | jrp lr /* keep backtracer happy */ | |
69 | STD_ENDPROC(KBacktraceIterator_init_current) | |
70 | ||
71 | /* | |
72 | * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then | |
73 | * free the old stack (passed in r0) and re-invoke cpu_idle(). | |
74 | * We update sp and ksp0 simultaneously to avoid backtracer warnings. | |
75 | */ | |
76 | STD_ENTRY(cpu_idle_on_new_stack) | |
77 | { | |
78 | move sp, r1 | |
a78c942d | 79 | mtspr SPR_SYSTEM_SAVE_K_0, r2 |
867e359b CM |
80 | } |
81 | jal free_thread_info | |
82 | j cpu_idle | |
83 | STD_ENDPROC(cpu_idle_on_new_stack) | |
84 | ||
85 | /* Loop forever on a nap during SMP boot. */ | |
86 | STD_ENTRY(smp_nap) | |
87 | nap | |
88 | j smp_nap /* we are not architecturally guaranteed not to exit nap */ | |
89 | jrp lr /* clue in the backtracer */ | |
90 | STD_ENDPROC(smp_nap) | |
91 | ||
92 | /* | |
93 | * Enable interrupts racelessly and then nap until interrupted. | |
0b989cac CM |
94 | * Architecturally, we are guaranteed that enabling interrupts via |
95 | * mtspr to INTERRUPT_CRITICAL_SECTION only interrupts at the next PC. | |
867e359b CM |
96 | * This function's _cpu_idle_nap address is special; see intvec.S. |
97 | * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and | |
98 | * as a result return to the function that called _cpu_idle(). | |
99 | */ | |
100 | STD_ENTRY(_cpu_idle) | |
0b989cac CM |
101 | movei r1, 1 |
102 | mtspr INTERRUPT_CRITICAL_SECTION, r1 | |
a78c942d | 103 | IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */ |
0b989cac | 104 | mtspr INTERRUPT_CRITICAL_SECTION, zero |
867e359b CM |
105 | .global _cpu_idle_nap |
106 | _cpu_idle_nap: | |
107 | nap | |
108 | jrp lr | |
109 | STD_ENDPROC(_cpu_idle) |