[ARM] 3108/2: old ABI compat: statfs64 and fstatfs64
[deliverable/linux.git] / arch / arm / kernel / entry-common.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/entry-common.S
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/config.h>
11
1da177e4
LT
12#include <asm/unistd.h>
13
14#include "entry-header.S"
15
1da177e4
LT
16
17 .align 5
18/*
19 * This is the fast syscall return path. We do as little as
20 * possible here, and this includes saving r0 back into the SVC
21 * stack.
22 */
23ret_fast_syscall:
1ec42c0c 24 disable_irq @ disable interrupts
1da177e4
LT
25 ldr r1, [tsk, #TI_FLAGS]
26 tst r1, #_TIF_WORK_MASK
27 bne fast_work_pending
f4dc9a4c
RK
28
29 @ fast_restore_user_regs
30 ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
31 ldr lr, [sp, #S_OFF + S_PC]! @ get pc
32 msr spsr_cxsf, r1 @ save in spsr_svc
33 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
34 mov r0, r0
35 add sp, sp, #S_FRAME_SIZE - S_PC
36 movs pc, lr @ return & move spsr_svc into cpsr
1da177e4
LT
37
38/*
39 * Ok, we need to do extra processing, enter the slow path.
40 */
41fast_work_pending:
42 str r0, [sp, #S_R0+S_OFF]! @ returned r0
43work_pending:
44 tst r1, #_TIF_NEED_RESCHED
45 bne work_resched
46 tst r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING
47 beq no_work_pending
48 mov r0, sp @ 'regs'
49 mov r2, why @ 'syscall'
50 bl do_notify_resume
a6c61e9d 51 b ret_slow_syscall @ Check work again
1da177e4
LT
52
53work_resched:
54 bl schedule
55/*
56 * "slow" syscall return path. "why" tells us if this was a real syscall.
57 */
58ENTRY(ret_to_user)
59ret_slow_syscall:
1ec42c0c 60 disable_irq @ disable interrupts
1da177e4
LT
61 ldr r1, [tsk, #TI_FLAGS]
62 tst r1, #_TIF_WORK_MASK
63 bne work_pending
64no_work_pending:
f4dc9a4c
RK
65 @ slow_restore_user_regs
66 ldr r1, [sp, #S_PSR] @ get calling cpsr
67 ldr lr, [sp, #S_PC]! @ get pc
68 msr spsr_cxsf, r1 @ save in spsr_svc
69 ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
70 mov r0, r0
71 add sp, sp, #S_FRAME_SIZE - S_PC
72 movs pc, lr @ return & move spsr_svc into cpsr
1da177e4
LT
73
74/*
75 * This is how we return from a fork.
76 */
77ENTRY(ret_from_fork)
78 bl schedule_tail
79 get_thread_info tsk
80 ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
81 mov why, #1
82 tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
83 beq ret_slow_syscall
84 mov r1, sp
85 mov r0, #1 @ trace exit [IP = 1]
86 bl syscall_trace
87 b ret_slow_syscall
88
89
90#include "calls.S"
91
92/*=============================================================================
93 * SWI handler
94 *-----------------------------------------------------------------------------
95 */
96
97 /* If we're optimising for StrongARM the resulting code won't
98 run on an ARM7 and we can save a couple of instructions.
99 --pb */
100#ifdef CONFIG_CPU_ARM710
3f2829a3
NP
101#define A710(code...) code
102.Larm710bug:
1da177e4
LT
103 ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
104 mov r0, r0
1da177e4 105 add sp, sp, #S_FRAME_SIZE
60ac133a 106 subs pc, lr, #4
1da177e4 107#else
3f2829a3 108#define A710(code...)
1da177e4
LT
109#endif
110
111 .align 5
112ENTRY(vector_swi)
f4dc9a4c
RK
113 sub sp, sp, #S_FRAME_SIZE
114 stmia sp, {r0 - r12} @ Calling r0 - r12
115 add r8, sp, #S_PC
116 stmdb r8, {sp, lr}^ @ Calling sp, lr
117 mrs r8, spsr @ called from non-FIQ mode, so ok.
118 str lr, [sp, #S_PC] @ Save calling PC
119 str r8, [sp, #S_PSR] @ Save CPSR
120 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
1da177e4 121 zero_fp
e0f9f4a6
RK
122
123 /*
124 * Get the system call number.
125 */
3f2829a3
NP
126#if defined(CONFIG_AEABI)
127
128 @ syscall number is in scno (r7) already.
129
130 A710( ldr ip, [lr, #-4] @ get SWI instruction )
131 A710( and ip, ip, #0x0f000000 @ check for SWI )
132 A710( teq ip, #0x0f000000 )
133 A710( bne .Larm710bug )
134#elif defined(CONFIG_ARM_THUMB)
e0f9f4a6
RK
135 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
136 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
137 ldreq scno, [lr, #-4]
138#else
139 ldr scno, [lr, #-4] @ get SWI instruction
3f2829a3
NP
140 A710( and ip, scno, #0x0f000000 @ check for SWI )
141 A710( teq ip, #0x0f000000 )
142 A710( bne .Larm710bug )
e0f9f4a6 143#endif
1da177e4
LT
144
145#ifdef CONFIG_ALIGNMENT_TRAP
146 ldr ip, __cr_alignment
147 ldr ip, [ip]
148 mcr p15, 0, ip, c1, c0 @ update control register
149#endif
1ec42c0c 150 enable_irq
1da177e4 151
1da177e4
LT
152 get_thread_info tsk
153 ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
3f2829a3 154#ifndef CONFIG_AEABI
1da177e4 155 bic scno, scno, #0xff000000 @ mask off SWI op-code
e0f9f4a6 156 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
3f2829a3 157#endif
1da177e4 158 adr tbl, sys_call_table @ load syscall table pointer
3f2829a3 159 stmdb sp!, {r4, r5} @ push fifth and sixth args
1da177e4
LT
160 tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
161 bne __sys_trace
162
1da177e4 163 cmp scno, #NR_syscalls @ check upper syscall limit
3f2829a3 164 adr lr, ret_fast_syscall @ return address
1da177e4
LT
165 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
166
167 add r1, sp, #S_OFF
1682: mov why, #0 @ no longer a real syscall
e0f9f4a6
RK
169 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
170 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
1da177e4
LT
171 bcs arm_syscall
172 b sys_ni_syscall @ not private func
173
174 /*
175 * This is the really slow path. We're going to be doing
176 * context switches, and waiting for our parent to respond.
177 */
178__sys_trace:
179 add r1, sp, #S_OFF
180 mov r0, #0 @ trace entry [IP = 0]
181 bl syscall_trace
182
183 adr lr, __sys_trace_return @ return address
184 add r1, sp, #S_R0 + S_OFF @ pointer to regs
185 cmp scno, #NR_syscalls @ check upper syscall limit
186 ldmccia r1, {r0 - r3} @ have to reload r0 - r3
187 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
188 b 2b
189
190__sys_trace_return:
191 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
192 mov r1, sp
193 mov r0, #1 @ trace exit [IP = 1]
194 bl syscall_trace
195 b ret_slow_syscall
196
197 .align 5
198#ifdef CONFIG_ALIGNMENT_TRAP
199 .type __cr_alignment, #object
200__cr_alignment:
201 .word cr_alignment
202#endif
203
204 .type sys_call_table, #object
205ENTRY(sys_call_table)
206#include "calls.S"
207
208/*============================================================================
209 * Special system call wrappers
210 */
211@ r0 = syscall number
567bd980 212@ r8 = syscall table
1da177e4
LT
213 .type sys_syscall, #function
214sys_syscall:
3f2829a3 215#ifndef CONFIG_AEABI
e0f9f4a6 216 eor scno, r0, #__NR_SYSCALL_BASE
1da177e4
LT
217 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
218 cmpne scno, #NR_syscalls @ check range
219 stmloia sp, {r5, r6} @ shuffle args
220 movlo r0, r1
221 movlo r1, r2
222 movlo r2, r3
223 movlo r3, r4
224 ldrlo pc, [tbl, scno, lsl #2]
3f2829a3 225#endif
1da177e4
LT
226 b sys_ni_syscall
227
228sys_fork_wrapper:
229 add r0, sp, #S_OFF
230 b sys_fork
231
232sys_vfork_wrapper:
233 add r0, sp, #S_OFF
234 b sys_vfork
235
236sys_execve_wrapper:
237 add r3, sp, #S_OFF
238 b sys_execve
239
240sys_clone_wrapper:
241 add ip, sp, #S_OFF
242 str ip, [sp, #4]
243 b sys_clone
244
245sys_sigsuspend_wrapper:
246 add r3, sp, #S_OFF
247 b sys_sigsuspend
248
249sys_rt_sigsuspend_wrapper:
250 add r2, sp, #S_OFF
251 b sys_rt_sigsuspend
252
253sys_sigreturn_wrapper:
254 add r0, sp, #S_OFF
255 b sys_sigreturn
256
257sys_rt_sigreturn_wrapper:
258 add r0, sp, #S_OFF
259 b sys_rt_sigreturn
260
261sys_sigaltstack_wrapper:
262 ldr r2, [sp, #S_OFF + S_SP]
263 b do_sigaltstack
264
713c4815
NP
265sys_statfs64_wrapper:
266 teq r1, #88
267 moveq r1, #84
268 b sys_statfs64
269
270sys_fstatfs64_wrapper:
271 teq r1, #88
272 moveq r1, #84
273 b sys_fstatfs64
274
1da177e4
LT
275/*
276 * Note: off_4k (r5) is always units of 4K. If we can't do the requested
277 * offset, we return EINVAL.
278 */
279sys_mmap2:
280#if PAGE_SHIFT > 12
281 tst r5, #PGOFF_MASK
282 moveq r5, r5, lsr #PAGE_SHIFT - 12
283 streq r5, [sp, #4]
284 beq do_mmap2
285 mov r0, #-EINVAL
286 RETINSTR(mov,pc, lr)
287#else
288 str r5, [sp, #4]
289 b do_mmap2
290#endif
This page took 0.090105 seconds and 5 git commands to generate.