Pull esi-support into release branch
[deliverable/linux.git] / arch / arm / kernel / entry-common.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/entry-common.S
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
1da177e4 10
1da177e4
LT
11#include <asm/unistd.h>
12
13#include "entry-header.S"
14
1da177e4
LT
15
16 .align 5
17/*
18 * This is the fast syscall return path. We do as little as
19 * possible here, and this includes saving r0 back into the SVC
20 * stack.
21 */
22ret_fast_syscall:
1ec42c0c 23 disable_irq @ disable interrupts
1da177e4
LT
24 ldr r1, [tsk, #TI_FLAGS]
25 tst r1, #_TIF_WORK_MASK
26 bne fast_work_pending
f4dc9a4c
RK
27
28 @ fast_restore_user_regs
29 ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
30 ldr lr, [sp, #S_OFF + S_PC]! @ get pc
31 msr spsr_cxsf, r1 @ save in spsr_svc
32 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
33 mov r0, r0
34 add sp, sp, #S_FRAME_SIZE - S_PC
35 movs pc, lr @ return & move spsr_svc into cpsr
1da177e4
LT
36
37/*
38 * Ok, we need to do extra processing, enter the slow path.
39 */
40fast_work_pending:
41 str r0, [sp, #S_R0+S_OFF]! @ returned r0
42work_pending:
43 tst r1, #_TIF_NEED_RESCHED
44 bne work_resched
45 tst r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING
46 beq no_work_pending
47 mov r0, sp @ 'regs'
48 mov r2, why @ 'syscall'
49 bl do_notify_resume
a6c61e9d 50 b ret_slow_syscall @ Check work again
1da177e4
LT
51
52work_resched:
53 bl schedule
54/*
55 * "slow" syscall return path. "why" tells us if this was a real syscall.
56 */
57ENTRY(ret_to_user)
58ret_slow_syscall:
1ec42c0c 59 disable_irq @ disable interrupts
1da177e4
LT
60 ldr r1, [tsk, #TI_FLAGS]
61 tst r1, #_TIF_WORK_MASK
62 bne work_pending
63no_work_pending:
f4dc9a4c
RK
64 @ slow_restore_user_regs
65 ldr r1, [sp, #S_PSR] @ get calling cpsr
66 ldr lr, [sp, #S_PC]! @ get pc
67 msr spsr_cxsf, r1 @ save in spsr_svc
68 ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
69 mov r0, r0
70 add sp, sp, #S_FRAME_SIZE - S_PC
71 movs pc, lr @ return & move spsr_svc into cpsr
1da177e4
LT
72
73/*
74 * This is how we return from a fork.
75 */
76ENTRY(ret_from_fork)
77 bl schedule_tail
78 get_thread_info tsk
79 ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
80 mov why, #1
81 tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
82 beq ret_slow_syscall
83 mov r1, sp
84 mov r0, #1 @ trace exit [IP = 1]
85 bl syscall_trace
86 b ret_slow_syscall
87
88
fa1b4f91
AV
89 .equ NR_syscalls,0
90#define CALL(x) .equ NR_syscalls,NR_syscalls+1
1da177e4 91#include "calls.S"
fa1b4f91
AV
92#undef CALL
93#define CALL(x) .long x
1da177e4
LT
94
95/*=============================================================================
96 * SWI handler
97 *-----------------------------------------------------------------------------
98 */
99
100 /* If we're optimising for StrongARM the resulting code won't
101 run on an ARM7 and we can save a couple of instructions.
102 --pb */
103#ifdef CONFIG_CPU_ARM710
3f2829a3
NP
104#define A710(code...) code
105.Larm710bug:
1da177e4
LT
106 ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
107 mov r0, r0
1da177e4 108 add sp, sp, #S_FRAME_SIZE
60ac133a 109 subs pc, lr, #4
1da177e4 110#else
3f2829a3 111#define A710(code...)
1da177e4
LT
112#endif
113
114 .align 5
115ENTRY(vector_swi)
f4dc9a4c
RK
116 sub sp, sp, #S_FRAME_SIZE
117 stmia sp, {r0 - r12} @ Calling r0 - r12
118 add r8, sp, #S_PC
119 stmdb r8, {sp, lr}^ @ Calling sp, lr
120 mrs r8, spsr @ called from non-FIQ mode, so ok.
121 str lr, [sp, #S_PC] @ Save calling PC
122 str r8, [sp, #S_PSR] @ Save CPSR
123 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
1da177e4 124 zero_fp
e0f9f4a6
RK
125
126 /*
127 * Get the system call number.
128 */
3f2829a3 129
dd35afc2 130#if defined(CONFIG_OABI_COMPAT)
3f2829a3 131
dd35afc2
NP
132 /*
133 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
134 * value to determine if it is an EABI or an old ABI call.
135 */
136#ifdef CONFIG_ARM_THUMB
137 tst r8, #PSR_T_BIT
138 movne r10, #0 @ no thumb OABI emulation
139 ldreq r10, [lr, #-4] @ get SWI instruction
140#else
141 ldr r10, [lr, #-4] @ get SWI instruction
142 A710( and ip, r10, #0x0f000000 @ check for SWI )
143 A710( teq ip, #0x0f000000 )
144 A710( bne .Larm710bug )
145#endif
146
147#elif defined(CONFIG_AEABI)
148
149 /*
150 * Pure EABI user space always put syscall number into scno (r7).
151 */
3f2829a3
NP
152 A710( ldr ip, [lr, #-4] @ get SWI instruction )
153 A710( and ip, ip, #0x0f000000 @ check for SWI )
154 A710( teq ip, #0x0f000000 )
155 A710( bne .Larm710bug )
dd35afc2 156
3f2829a3 157#elif defined(CONFIG_ARM_THUMB)
dd35afc2
NP
158
159 /* Legacy ABI only, possibly thumb mode. */
e0f9f4a6
RK
160 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
161 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
162 ldreq scno, [lr, #-4]
dd35afc2 163
e0f9f4a6 164#else
dd35afc2
NP
165
166 /* Legacy ABI only. */
e0f9f4a6 167 ldr scno, [lr, #-4] @ get SWI instruction
3f2829a3
NP
168 A710( and ip, scno, #0x0f000000 @ check for SWI )
169 A710( teq ip, #0x0f000000 )
170 A710( bne .Larm710bug )
dd35afc2 171
e0f9f4a6 172#endif
1da177e4
LT
173
174#ifdef CONFIG_ALIGNMENT_TRAP
175 ldr ip, __cr_alignment
176 ldr ip, [ip]
177 mcr p15, 0, ip, c1, c0 @ update control register
178#endif
1ec42c0c 179 enable_irq
1da177e4 180
1da177e4 181 get_thread_info tsk
dd35afc2 182 adr tbl, sys_call_table @ load syscall table pointer
1da177e4 183 ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
dd35afc2
NP
184
185#if defined(CONFIG_OABI_COMPAT)
186 /*
187 * If the swi argument is zero, this is an EABI call and we do nothing.
188 *
189 * If this is an old ABI call, get the syscall number into scno and
190 * get the old ABI syscall table address.
191 */
192 bics r10, r10, #0xff000000
193 eorne scno, r10, #__NR_OABI_SYSCALL_BASE
194 ldrne tbl, =sys_oabi_call_table
195#elif !defined(CONFIG_AEABI)
1da177e4 196 bic scno, scno, #0xff000000 @ mask off SWI op-code
e0f9f4a6 197 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
3f2829a3 198#endif
dd35afc2 199
3f2829a3 200 stmdb sp!, {r4, r5} @ push fifth and sixth args
1da177e4
LT
201 tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
202 bne __sys_trace
203
1da177e4 204 cmp scno, #NR_syscalls @ check upper syscall limit
3f2829a3 205 adr lr, ret_fast_syscall @ return address
1da177e4
LT
206 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
207
208 add r1, sp, #S_OFF
2092: mov why, #0 @ no longer a real syscall
e0f9f4a6
RK
210 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
211 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
1da177e4
LT
212 bcs arm_syscall
213 b sys_ni_syscall @ not private func
214
215 /*
216 * This is the really slow path. We're going to be doing
217 * context switches, and waiting for our parent to respond.
218 */
219__sys_trace:
3f471126 220 mov r2, scno
1da177e4
LT
221 add r1, sp, #S_OFF
222 mov r0, #0 @ trace entry [IP = 0]
223 bl syscall_trace
224
225 adr lr, __sys_trace_return @ return address
3f471126 226 mov scno, r0 @ syscall number (possibly new)
1da177e4
LT
227 add r1, sp, #S_R0 + S_OFF @ pointer to regs
228 cmp scno, #NR_syscalls @ check upper syscall limit
229 ldmccia r1, {r0 - r3} @ have to reload r0 - r3
230 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
231 b 2b
232
233__sys_trace_return:
234 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
3f471126 235 mov r2, scno
1da177e4
LT
236 mov r1, sp
237 mov r0, #1 @ trace exit [IP = 1]
238 bl syscall_trace
239 b ret_slow_syscall
240
241 .align 5
242#ifdef CONFIG_ALIGNMENT_TRAP
243 .type __cr_alignment, #object
244__cr_alignment:
245 .word cr_alignment
dd35afc2
NP
246#endif
247 .ltorg
248
249/*
250 * This is the syscall table declaration for native ABI syscalls.
251 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
252 */
253#define ABI(native, compat) native
254#ifdef CONFIG_AEABI
255#define OBSOLETE(syscall) sys_ni_syscall
256#else
257#define OBSOLETE(syscall) syscall
1da177e4
LT
258#endif
259
260 .type sys_call_table, #object
261ENTRY(sys_call_table)
262#include "calls.S"
dd35afc2
NP
263#undef ABI
264#undef OBSOLETE
1da177e4
LT
265
266/*============================================================================
267 * Special system call wrappers
268 */
269@ r0 = syscall number
567bd980 270@ r8 = syscall table
1da177e4
LT
271 .type sys_syscall, #function
272sys_syscall:
5247593c 273 bic scno, r0, #__NR_OABI_SYSCALL_BASE
1da177e4
LT
274 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
275 cmpne scno, #NR_syscalls @ check range
276 stmloia sp, {r5, r6} @ shuffle args
277 movlo r0, r1
278 movlo r1, r2
279 movlo r2, r3
280 movlo r3, r4
281 ldrlo pc, [tbl, scno, lsl #2]
282 b sys_ni_syscall
283
284sys_fork_wrapper:
285 add r0, sp, #S_OFF
286 b sys_fork
287
288sys_vfork_wrapper:
289 add r0, sp, #S_OFF
290 b sys_vfork
291
292sys_execve_wrapper:
293 add r3, sp, #S_OFF
294 b sys_execve
295
296sys_clone_wrapper:
297 add ip, sp, #S_OFF
298 str ip, [sp, #4]
299 b sys_clone
300
301sys_sigsuspend_wrapper:
302 add r3, sp, #S_OFF
303 b sys_sigsuspend
304
305sys_rt_sigsuspend_wrapper:
306 add r2, sp, #S_OFF
307 b sys_rt_sigsuspend
308
309sys_sigreturn_wrapper:
310 add r0, sp, #S_OFF
311 b sys_sigreturn
312
313sys_rt_sigreturn_wrapper:
314 add r0, sp, #S_OFF
315 b sys_rt_sigreturn
316
317sys_sigaltstack_wrapper:
318 ldr r2, [sp, #S_OFF + S_SP]
319 b do_sigaltstack
320
713c4815
NP
321sys_statfs64_wrapper:
322 teq r1, #88
323 moveq r1, #84
324 b sys_statfs64
325
326sys_fstatfs64_wrapper:
327 teq r1, #88
328 moveq r1, #84
329 b sys_fstatfs64
330
1da177e4
LT
331/*
332 * Note: off_4k (r5) is always units of 4K. If we can't do the requested
333 * offset, we return EINVAL.
334 */
335sys_mmap2:
336#if PAGE_SHIFT > 12
337 tst r5, #PGOFF_MASK
338 moveq r5, r5, lsr #PAGE_SHIFT - 12
339 streq r5, [sp, #4]
340 beq do_mmap2
341 mov r0, #-EINVAL
7999d8d7 342 mov pc, lr
1da177e4
LT
343#else
344 str r5, [sp, #4]
345 b do_mmap2
346#endif
687ad019
NP
347
348#ifdef CONFIG_OABI_COMPAT
dd35afc2 349
687ad019
NP
350/*
351 * These are syscalls with argument register differences
352 */
353
354sys_oabi_pread64:
355 stmia sp, {r3, r4}
356 b sys_pread64
357
358sys_oabi_pwrite64:
359 stmia sp, {r3, r4}
360 b sys_pwrite64
361
362sys_oabi_truncate64:
363 mov r3, r2
364 mov r2, r1
365 b sys_truncate64
366
367sys_oabi_ftruncate64:
368 mov r3, r2
369 mov r2, r1
370 b sys_ftruncate64
371
372sys_oabi_readahead:
373 str r3, [sp]
374 mov r3, r2
375 mov r2, r1
376 b sys_readahead
377
dd35afc2
NP
378/*
379 * Let's declare a second syscall table for old ABI binaries
380 * using the compatibility syscall entries.
381 */
382#define ABI(native, compat) compat
383#define OBSOLETE(syscall) syscall
384
385 .type sys_oabi_call_table, #object
386ENTRY(sys_oabi_call_table)
387#include "calls.S"
388#undef ABI
389#undef OBSOLETE
390
687ad019
NP
391#endif
392
This page took 0.194756 seconds and 5 git commands to generate.