Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/entry-common.S | |
3 | * | |
4 | * Copyright (C) 2000 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #include <linux/config.h> | |
11 | ||
12 | #include <asm/thread_info.h> | |
13 | #include <asm/ptrace.h> | |
14 | #include <asm/unistd.h> | |
15 | ||
16 | #include "entry-header.S" | |
17 | ||
18 | /* | |
19 | * We rely on the fact that R0 is at the bottom of the stack (due to | |
20 | * slow/fast restore user regs). | |
21 | */ | |
22 | #if S_R0 != 0 | |
23 | #error "Please fix" | |
24 | #endif | |
25 | ||
26 | .align 5 | |
27 | /* | |
28 | * This is the fast syscall return path. We do as little as | |
29 | * possible here, and this includes saving r0 back into the SVC | |
30 | * stack. | |
31 | */ | |
32 | ret_fast_syscall: | |
33 | disable_irq r1 @ disable interrupts | |
34 | ldr r1, [tsk, #TI_FLAGS] | |
35 | tst r1, #_TIF_WORK_MASK | |
36 | bne fast_work_pending | |
37 | fast_restore_user_regs | |
38 | ||
39 | /* | |
40 | * Ok, we need to do extra processing, enter the slow path. | |
41 | */ | |
42 | fast_work_pending: | |
43 | str r0, [sp, #S_R0+S_OFF]! @ returned r0 | |
44 | work_pending: | |
45 | tst r1, #_TIF_NEED_RESCHED | |
46 | bne work_resched | |
47 | tst r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | |
48 | beq no_work_pending | |
49 | mov r0, sp @ 'regs' | |
50 | mov r2, why @ 'syscall' | |
51 | bl do_notify_resume | |
52 | disable_irq r1 @ disable interrupts | |
53 | b no_work_pending | |
54 | ||
55 | work_resched: | |
56 | bl schedule | |
57 | /* | |
58 | * "slow" syscall return path. "why" tells us if this was a real syscall. | |
59 | */ | |
60 | ENTRY(ret_to_user) | |
61 | ret_slow_syscall: | |
62 | disable_irq r1 @ disable interrupts | |
63 | ldr r1, [tsk, #TI_FLAGS] | |
64 | tst r1, #_TIF_WORK_MASK | |
65 | bne work_pending | |
66 | no_work_pending: | |
67 | slow_restore_user_regs | |
68 | ||
69 | /* | |
70 | * This is how we return from a fork. | |
71 | */ | |
72 | ENTRY(ret_from_fork) | |
73 | bl schedule_tail | |
74 | get_thread_info tsk | |
75 | ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing | |
76 | mov why, #1 | |
77 | tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? | |
78 | beq ret_slow_syscall | |
79 | mov r1, sp | |
80 | mov r0, #1 @ trace exit [IP = 1] | |
81 | bl syscall_trace | |
82 | b ret_slow_syscall | |
83 | ||
84 | ||
85 | #include "calls.S" | |
86 | ||
87 | /*============================================================================= | |
88 | * SWI handler | |
89 | *----------------------------------------------------------------------------- | |
90 | */ | |
91 | ||
92 | /* If we're optimising for StrongARM the resulting code won't | |
93 | run on an ARM7 and we can save a couple of instructions. | |
94 | --pb */ | |
95 | #ifdef CONFIG_CPU_ARM710 | |
96 | .macro arm710_bug_check, instr, temp | |
97 | and \temp, \instr, #0x0f000000 @ check for SWI | |
98 | teq \temp, #0x0f000000 | |
99 | bne .Larm700bug | |
100 | .endm | |
101 | ||
102 | .Larm700bug: | |
103 | ldr r0, [sp, #S_PSR] @ Get calling cpsr | |
104 | sub lr, lr, #4 | |
105 | str lr, [r8] | |
106 | msr spsr_cxsf, r0 | |
107 | ldmia sp, {r0 - lr}^ @ Get calling r0 - lr | |
108 | mov r0, r0 | |
109 | ldr lr, [sp, #S_PC] @ Get PC | |
110 | add sp, sp, #S_FRAME_SIZE | |
111 | movs pc, lr | |
112 | #else | |
113 | .macro arm710_bug_check, instr, temp | |
114 | .endm | |
115 | #endif | |
116 | ||
117 | .align 5 | |
118 | ENTRY(vector_swi) | |
119 | save_user_regs | |
120 | zero_fp | |
121 | get_scno | |
122 | arm710_bug_check scno, ip | |
123 | ||
124 | #ifdef CONFIG_ALIGNMENT_TRAP | |
125 | ldr ip, __cr_alignment | |
126 | ldr ip, [ip] | |
127 | mcr p15, 0, ip, c1, c0 @ update control register | |
128 | #endif | |
129 | enable_irq ip | |
130 | ||
131 | str r4, [sp, #-S_OFF]! @ push fifth arg | |
132 | ||
133 | get_thread_info tsk | |
134 | ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing | |
135 | bic scno, scno, #0xff000000 @ mask off SWI op-code | |
136 | eor scno, scno, #OS_NUMBER << 20 @ check OS number | |
137 | adr tbl, sys_call_table @ load syscall table pointer | |
138 | tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? | |
139 | bne __sys_trace | |
140 | ||
141 | adr lr, ret_fast_syscall @ return address | |
142 | cmp scno, #NR_syscalls @ check upper syscall limit | |
143 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine | |
144 | ||
145 | add r1, sp, #S_OFF | |
146 | 2: mov why, #0 @ no longer a real syscall | |
147 | cmp scno, #ARMSWI_OFFSET | |
148 | eor r0, scno, #OS_NUMBER << 20 @ put OS number back | |
149 | bcs arm_syscall | |
150 | b sys_ni_syscall @ not private func | |
151 | ||
152 | /* | |
153 | * This is the really slow path. We're going to be doing | |
154 | * context switches, and waiting for our parent to respond. | |
155 | */ | |
156 | __sys_trace: | |
157 | add r1, sp, #S_OFF | |
158 | mov r0, #0 @ trace entry [IP = 0] | |
159 | bl syscall_trace | |
160 | ||
161 | adr lr, __sys_trace_return @ return address | |
162 | add r1, sp, #S_R0 + S_OFF @ pointer to regs | |
163 | cmp scno, #NR_syscalls @ check upper syscall limit | |
164 | ldmccia r1, {r0 - r3} @ have to reload r0 - r3 | |
165 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine | |
166 | b 2b | |
167 | ||
168 | __sys_trace_return: | |
169 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 | |
170 | mov r1, sp | |
171 | mov r0, #1 @ trace exit [IP = 1] | |
172 | bl syscall_trace | |
173 | b ret_slow_syscall | |
174 | ||
175 | .align 5 | |
176 | #ifdef CONFIG_ALIGNMENT_TRAP | |
177 | .type __cr_alignment, #object | |
178 | __cr_alignment: | |
179 | .word cr_alignment | |
180 | #endif | |
181 | ||
182 | .type sys_call_table, #object | |
183 | ENTRY(sys_call_table) | |
184 | #include "calls.S" | |
185 | ||
186 | /*============================================================================ | |
187 | * Special system call wrappers | |
188 | */ | |
189 | @ r0 = syscall number | |
190 | @ r5 = syscall table | |
191 | .type sys_syscall, #function | |
192 | sys_syscall: | |
193 | eor scno, r0, #OS_NUMBER << 20 | |
194 | cmp scno, #__NR_syscall - __NR_SYSCALL_BASE | |
195 | cmpne scno, #NR_syscalls @ check range | |
196 | stmloia sp, {r5, r6} @ shuffle args | |
197 | movlo r0, r1 | |
198 | movlo r1, r2 | |
199 | movlo r2, r3 | |
200 | movlo r3, r4 | |
201 | ldrlo pc, [tbl, scno, lsl #2] | |
202 | b sys_ni_syscall | |
203 | ||
204 | sys_fork_wrapper: | |
205 | add r0, sp, #S_OFF | |
206 | b sys_fork | |
207 | ||
208 | sys_vfork_wrapper: | |
209 | add r0, sp, #S_OFF | |
210 | b sys_vfork | |
211 | ||
212 | sys_execve_wrapper: | |
213 | add r3, sp, #S_OFF | |
214 | b sys_execve | |
215 | ||
216 | sys_clone_wrapper: | |
217 | add ip, sp, #S_OFF | |
218 | str ip, [sp, #4] | |
219 | b sys_clone | |
220 | ||
221 | sys_sigsuspend_wrapper: | |
222 | add r3, sp, #S_OFF | |
223 | b sys_sigsuspend | |
224 | ||
225 | sys_rt_sigsuspend_wrapper: | |
226 | add r2, sp, #S_OFF | |
227 | b sys_rt_sigsuspend | |
228 | ||
229 | sys_sigreturn_wrapper: | |
230 | add r0, sp, #S_OFF | |
231 | b sys_sigreturn | |
232 | ||
233 | sys_rt_sigreturn_wrapper: | |
234 | add r0, sp, #S_OFF | |
235 | b sys_rt_sigreturn | |
236 | ||
237 | sys_sigaltstack_wrapper: | |
238 | ldr r2, [sp, #S_OFF + S_SP] | |
239 | b do_sigaltstack | |
240 | ||
241 | sys_futex_wrapper: | |
242 | str r5, [sp, #4] @ push sixth arg | |
243 | b sys_futex | |
244 | ||
245 | /* | |
246 | * Note: off_4k (r5) is always units of 4K. If we can't do the requested | |
247 | * offset, we return EINVAL. | |
248 | */ | |
249 | sys_mmap2: | |
250 | #if PAGE_SHIFT > 12 | |
251 | tst r5, #PGOFF_MASK | |
252 | moveq r5, r5, lsr #PAGE_SHIFT - 12 | |
253 | streq r5, [sp, #4] | |
254 | beq do_mmap2 | |
255 | mov r0, #-EINVAL | |
256 | RETINSTR(mov,pc, lr) | |
257 | #else | |
258 | str r5, [sp, #4] | |
259 | b do_mmap2 | |
260 | #endif |