Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/entry-common.S | |
3 | * | |
4 | * Copyright (C) 2000 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
1da177e4 | 10 | |
1da177e4 | 11 | #include <asm/unistd.h> |
395a59d0 | 12 | #include <asm/ftrace.h> |
c4c5716e | 13 | #include <asm/unwind.h> |
1da177e4 | 14 | |
13a5045d RH |
15 | #ifdef CONFIG_NEED_RET_TO_USER |
16 | #include <mach/entry-macro.S> | |
17 | #else | |
18 | .macro arch_ret_to_user, tmp1, tmp2 | |
19 | .endm | |
20 | #endif | |
21 | ||
1da177e4 LT |
22 | #include "entry-header.S" |
23 | ||
1da177e4 LT |
24 | |
25 | .align 5 | |
26 | /* | |
27 | * This is the fast syscall return path. We do as little as | |
28 | * possible here, and this includes saving r0 back into the SVC | |
29 | * stack. | |
30 | */ | |
31 | ret_fast_syscall: | |
c4c5716e CM |
32 | UNWIND(.fnstart ) |
33 | UNWIND(.cantunwind ) | |
1ec42c0c | 34 | disable_irq @ disable interrupts |
1da177e4 LT |
35 | ldr r1, [tsk, #TI_FLAGS] |
36 | tst r1, #_TIF_WORK_MASK | |
37 | bne fast_work_pending | |
d13e5edd TAP |
38 | #if defined(CONFIG_IRQSOFF_TRACER) |
39 | asm_trace_hardirqs_on | |
40 | #endif | |
f4dc9a4c | 41 | |
f80dff9d DW |
42 | /* perform architecture specific actions before user return */ |
43 | arch_ret_to_user r1, lr | |
44 | ||
b86040a5 | 45 | restore_user_regs fast = 1, offset = S_OFF |
c4c5716e | 46 | UNWIND(.fnend ) |
1da177e4 LT |
47 | |
48 | /* | |
49 | * Ok, we need to do extra processing, enter the slow path. | |
50 | */ | |
51 | fast_work_pending: | |
52 | str r0, [sp, #S_R0+S_OFF]! @ returned r0 | |
53 | work_pending: | |
54 | tst r1, #_TIF_NEED_RESCHED | |
55 | bne work_resched | |
84849b3e AV |
56 | /* |
57 | * TIF_SIGPENDING or TIF_NOTIFY_RESUME must've been set if we got here | |
58 | */ | |
21c1176a | 59 | ldr r2, [sp, #S_PSR] |
1da177e4 | 60 | mov r0, sp @ 'regs' |
21c1176a AV |
61 | tst r2, #15 @ are we returning to user mode? |
62 | bne no_work_pending @ no? just leave, then... | |
1da177e4 | 63 | mov r2, why @ 'syscall' |
b2b163bb RK |
64 | tst r1, #_TIF_SIGPENDING @ delivering a signal? |
65 | movne why, #0 @ prevent further restarts | |
1da177e4 | 66 | bl do_notify_resume |
a6c61e9d | 67 | b ret_slow_syscall @ Check work again |
1da177e4 LT |
68 | |
69 | work_resched: | |
70 | bl schedule | |
71 | /* | |
72 | * "slow" syscall return path. "why" tells us if this was a real syscall. | |
73 | */ | |
74 | ENTRY(ret_to_user) | |
75 | ret_slow_syscall: | |
1ec42c0c | 76 | disable_irq @ disable interrupts |
9fc2552a | 77 | ENTRY(ret_to_user_from_irq) |
1da177e4 LT |
78 | ldr r1, [tsk, #TI_FLAGS] |
79 | tst r1, #_TIF_WORK_MASK | |
80 | bne work_pending | |
81 | no_work_pending: | |
d13e5edd TAP |
82 | #if defined(CONFIG_IRQSOFF_TRACER) |
83 | asm_trace_hardirqs_on | |
84 | #endif | |
f80dff9d DW |
85 | /* perform architecture specific actions before user return */ |
86 | arch_ret_to_user r1, lr | |
87 | ||
b86040a5 | 88 | restore_user_regs fast = 0, offset = 0 |
9fc2552a | 89 | ENDPROC(ret_to_user_from_irq) |
93ed3970 | 90 | ENDPROC(ret_to_user) |
1da177e4 LT |
91 | |
92 | /* | |
93 | * This is how we return from a fork. | |
94 | */ | |
95 | ENTRY(ret_from_fork) | |
96 | bl schedule_tail | |
97 | get_thread_info tsk | |
1da177e4 | 98 | mov why, #1 |
1da177e4 | 99 | b ret_slow_syscall |
93ed3970 | 100 | ENDPROC(ret_from_fork) |
1da177e4 | 101 | |
fa1b4f91 AV |
102 | .equ NR_syscalls,0 |
103 | #define CALL(x) .equ NR_syscalls,NR_syscalls+1 | |
1da177e4 | 104 | #include "calls.S" |
fa1b4f91 AV |
105 | #undef CALL |
106 | #define CALL(x) .long x | |
1da177e4 | 107 | |
606576ce | 108 | #ifdef CONFIG_FUNCTION_TRACER |
686ff228 RV |
109 | /* |
110 | * When compiling with -pg, gcc inserts a call to the mcount routine at the | |
111 | * start of every function. In mcount, apart from the function's address (in | |
112 | * lr), we need to get hold of the function's caller's address. | |
113 | * | |
114 | * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: | |
115 | * | |
116 | * bl mcount | |
117 | * | |
118 | * These versions have the limitation that in order for the mcount routine to | |
119 | * be able to determine the function's caller's address, an APCS-style frame | |
120 | * pointer (which is set up with something like the code below) is required. | |
121 | * | |
122 | * mov ip, sp | |
123 | * push {fp, ip, lr, pc} | |
124 | * sub fp, ip, #4 | |
125 | * | |
126 | * With EABI, these frame pointers are not available unless -mapcs-frame is | |
127 | * specified, and if building as Thumb-2, not even then. | |
128 | * | |
129 | * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, | |
130 | * with call sites like: | |
131 | * | |
132 | * push {lr} | |
133 | * bl __gnu_mcount_nc | |
134 | * | |
135 | * With these compilers, frame pointers are not necessary. | |
136 | * | |
137 | * mcount can be thought of as a function called in the middle of a subroutine | |
138 | * call. As such, it needs to be transparent for both the caller and the | |
139 | * callee: the original lr needs to be restored when leaving mcount, and no | |
140 | * registers should be clobbered. (In the __gnu_mcount_nc implementation, we | |
141 | * clobber the ip register. This is OK because the ARM calling convention | |
142 | * allows it to be clobbered in subroutines and doesn't use it to hold | |
143 | * parameters.) | |
3b6c223b RV |
144 | * |
145 | * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" | |
146 | * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see | |
147 | * arch/arm/kernel/ftrace.c). | |
686ff228 | 148 | */ |
09bfafac RV |
149 | |
150 | #ifndef CONFIG_OLD_MCOUNT | |
151 | #if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) | |
152 | #error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. | |
153 | #endif | |
154 | #endif | |
155 | ||
d68133b5 RV |
156 | .macro mcount_adjust_addr rd, rn |
157 | bic \rd, \rn, #1 @ clear the Thumb bit if present | |
158 | sub \rd, \rd, #MCOUNT_INSN_SIZE | |
159 | .endm | |
160 | ||
d3b9dc9d RV |
161 | .macro __mcount suffix |
162 | mcount_enter | |
163 | ldr r0, =ftrace_trace_function | |
164 | ldr r2, [r0] | |
165 | adr r0, .Lftrace_stub | |
166 | cmp r0, r2 | |
167 | bne 1f | |
3b6c223b | 168 | |
376cfa87 TB |
169 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
170 | ldr r1, =ftrace_graph_return | |
171 | ldr r2, [r1] | |
172 | cmp r0, r2 | |
173 | bne ftrace_graph_caller\suffix | |
174 | ||
175 | ldr r1, =ftrace_graph_entry | |
176 | ldr r2, [r1] | |
177 | ldr r0, =ftrace_graph_entry_stub | |
178 | cmp r0, r2 | |
179 | bne ftrace_graph_caller\suffix | |
180 | #endif | |
181 | ||
d3b9dc9d | 182 | mcount_exit |
3b6c223b | 183 | |
d3b9dc9d | 184 | 1: mcount_get_lr r1 @ lr of instrumented func |
d68133b5 | 185 | mcount_adjust_addr r0, lr @ instrumented function |
d3b9dc9d RV |
186 | adr lr, BSYM(2f) |
187 | mov pc, r2 | |
188 | 2: mcount_exit | |
189 | .endm | |
014c257c | 190 | |
d3b9dc9d RV |
191 | .macro __ftrace_caller suffix |
192 | mcount_enter | |
014c257c | 193 | |
d3b9dc9d | 194 | mcount_get_lr r1 @ lr of instrumented func |
d68133b5 | 195 | mcount_adjust_addr r0, lr @ instrumented function |
d3b9dc9d RV |
196 | |
197 | .globl ftrace_call\suffix | |
198 | ftrace_call\suffix: | |
28e192d6 | 199 | bl ftrace_stub |
d3b9dc9d | 200 | |
dd686eb1 RV |
201 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
202 | .globl ftrace_graph_call\suffix | |
203 | ftrace_graph_call\suffix: | |
204 | mov r0, r0 | |
205 | #endif | |
206 | ||
d3b9dc9d RV |
207 | mcount_exit |
208 | .endm | |
014c257c | 209 | |
376cfa87 TB |
210 | .macro __ftrace_graph_caller |
211 | sub r0, fp, #4 @ &lr of instrumented routine (&parent) | |
dd686eb1 RV |
212 | #ifdef CONFIG_DYNAMIC_FTRACE |
213 | @ called from __ftrace_caller, saved in mcount_enter | |
214 | ldr r1, [sp, #16] @ instrumented routine (func) | |
d68133b5 | 215 | mcount_adjust_addr r1, r1 |
dd686eb1 RV |
216 | #else |
217 | @ called from __mcount, untouched in lr | |
d68133b5 | 218 | mcount_adjust_addr r1, lr @ instrumented routine (func) |
dd686eb1 | 219 | #endif |
376cfa87 TB |
220 | mov r2, fp @ frame pointer |
221 | bl prepare_ftrace_return | |
222 | mcount_exit | |
223 | .endm | |
014c257c | 224 | |
3b6c223b | 225 | #ifdef CONFIG_OLD_MCOUNT |
d3b9dc9d RV |
226 | /* |
227 | * mcount | |
228 | */ | |
229 | ||
230 | .macro mcount_enter | |
231 | stmdb sp!, {r0-r3, lr} | |
232 | .endm | |
233 | ||
234 | .macro mcount_get_lr reg | |
235 | ldr \reg, [fp, #-4] | |
236 | .endm | |
237 | ||
238 | .macro mcount_exit | |
239 | ldr lr, [fp, #-4] | |
240 | ldmia sp!, {r0-r3, pc} | |
241 | .endm | |
242 | ||
3b6c223b | 243 | ENTRY(mcount) |
d3b9dc9d | 244 | #ifdef CONFIG_DYNAMIC_FTRACE |
3b6c223b RV |
245 | stmdb sp!, {lr} |
246 | ldr lr, [fp, #-4] | |
247 | ldmia sp!, {pc} | |
d3b9dc9d RV |
248 | #else |
249 | __mcount _old | |
250 | #endif | |
72fa62fa | 251 | ENDPROC(mcount) |
014c257c | 252 | |
d3b9dc9d | 253 | #ifdef CONFIG_DYNAMIC_FTRACE |
3b6c223b | 254 | ENTRY(ftrace_caller_old) |
d3b9dc9d | 255 | __ftrace_caller _old |
3b6c223b RV |
256 | ENDPROC(ftrace_caller_old) |
257 | #endif | |
014c257c | 258 | |
376cfa87 TB |
259 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
260 | ENTRY(ftrace_graph_caller_old) | |
261 | __ftrace_graph_caller | |
262 | ENDPROC(ftrace_graph_caller_old) | |
263 | #endif | |
014c257c | 264 | |
d3b9dc9d RV |
265 | .purgem mcount_enter |
266 | .purgem mcount_get_lr | |
267 | .purgem mcount_exit | |
268 | #endif | |
014c257c | 269 | |
d3b9dc9d RV |
270 | /* |
271 | * __gnu_mcount_nc | |
272 | */ | |
273 | ||
274 | .macro mcount_enter | |
28e192d6 | 275 | stmdb sp!, {r0-r3, lr} |
d3b9dc9d RV |
276 | .endm |
277 | ||
278 | .macro mcount_get_lr reg | |
279 | ldr \reg, [sp, #20] | |
280 | .endm | |
281 | ||
282 | .macro mcount_exit | |
28e192d6 RV |
283 | ldmia sp!, {r0-r3, ip, lr} |
284 | mov pc, ip | |
d3b9dc9d | 285 | .endm |
181f817e | 286 | |
d3b9dc9d RV |
287 | ENTRY(__gnu_mcount_nc) |
288 | #ifdef CONFIG_DYNAMIC_FTRACE | |
289 | mov ip, lr | |
290 | ldmia sp!, {lr} | |
28e192d6 | 291 | mov pc, ip |
d3b9dc9d RV |
292 | #else |
293 | __mcount | |
294 | #endif | |
72fa62fa | 295 | ENDPROC(__gnu_mcount_nc) |
181f817e | 296 | |
d3b9dc9d RV |
297 | #ifdef CONFIG_DYNAMIC_FTRACE |
298 | ENTRY(ftrace_caller) | |
299 | __ftrace_caller | |
300 | ENDPROC(ftrace_caller) | |
09bfafac | 301 | #endif |
014c257c | 302 | |
376cfa87 TB |
303 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
304 | ENTRY(ftrace_graph_caller) | |
305 | __ftrace_graph_caller | |
306 | ENDPROC(ftrace_graph_caller) | |
09bfafac | 307 | #endif |
014c257c | 308 | |
d3b9dc9d RV |
309 | .purgem mcount_enter |
310 | .purgem mcount_get_lr | |
311 | .purgem mcount_exit | |
014c257c | 312 | |
376cfa87 TB |
313 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
314 | .globl return_to_handler | |
315 | return_to_handler: | |
316 | stmdb sp!, {r0-r3} | |
317 | mov r0, fp @ frame pointer | |
318 | bl ftrace_return_to_handler | |
319 | mov lr, r0 @ r0 has real ret addr | |
320 | ldmia sp!, {r0-r3} | |
321 | mov pc, lr | |
322 | #endif | |
014c257c | 323 | |
72fa62fa | 324 | ENTRY(ftrace_stub) |
a3ba87a6 | 325 | .Lftrace_stub: |
28e192d6 | 326 | mov pc, lr |
72fa62fa | 327 | ENDPROC(ftrace_stub) |
014c257c | 328 | |
606576ce | 329 | #endif /* CONFIG_FUNCTION_TRACER */ |
014c257c | 330 | |
1da177e4 LT |
331 | /*============================================================================= |
332 | * SWI handler | |
333 | *----------------------------------------------------------------------------- | |
334 | */ | |
335 | ||
1da177e4 LT |
336 | .align 5 |
337 | ENTRY(vector_swi) | |
f4dc9a4c RK |
338 | sub sp, sp, #S_FRAME_SIZE |
339 | stmia sp, {r0 - r12} @ Calling r0 - r12 | |
b86040a5 CM |
340 | ARM( add r8, sp, #S_PC ) |
341 | ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr | |
342 | THUMB( mov r8, sp ) | |
343 | THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr | |
f4dc9a4c RK |
344 | mrs r8, spsr @ called from non-FIQ mode, so ok. |
345 | str lr, [sp, #S_PC] @ Save calling PC | |
346 | str r8, [sp, #S_PSR] @ Save CPSR | |
347 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 | |
1da177e4 | 348 | zero_fp |
e0f9f4a6 RK |
349 | |
350 | /* | |
351 | * Get the system call number. | |
352 | */ | |
3f2829a3 | 353 | |
dd35afc2 | 354 | #if defined(CONFIG_OABI_COMPAT) |
3f2829a3 | 355 | |
dd35afc2 NP |
356 | /* |
357 | * If we have CONFIG_OABI_COMPAT then we need to look at the swi | |
358 | * value to determine if it is an EABI or an old ABI call. | |
359 | */ | |
360 | #ifdef CONFIG_ARM_THUMB | |
361 | tst r8, #PSR_T_BIT | |
362 | movne r10, #0 @ no thumb OABI emulation | |
363 | ldreq r10, [lr, #-4] @ get SWI instruction | |
364 | #else | |
365 | ldr r10, [lr, #-4] @ get SWI instruction | |
dd35afc2 | 366 | #endif |
26584853 CM |
367 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
368 | rev r10, r10 @ little endian instruction | |
369 | #endif | |
dd35afc2 NP |
370 | |
371 | #elif defined(CONFIG_AEABI) | |
372 | ||
373 | /* | |
374 | * Pure EABI user space always put syscall number into scno (r7). | |
375 | */ | |
3f2829a3 | 376 | #elif defined(CONFIG_ARM_THUMB) |
dd35afc2 | 377 | /* Legacy ABI only, possibly thumb mode. */ |
e0f9f4a6 RK |
378 | tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs |
379 | addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in | |
380 | ldreq scno, [lr, #-4] | |
dd35afc2 | 381 | |
e0f9f4a6 | 382 | #else |
dd35afc2 | 383 | /* Legacy ABI only. */ |
e0f9f4a6 RK |
384 | ldr scno, [lr, #-4] @ get SWI instruction |
385 | #endif | |
1da177e4 LT |
386 | |
387 | #ifdef CONFIG_ALIGNMENT_TRAP | |
388 | ldr ip, __cr_alignment | |
389 | ldr ip, [ip] | |
390 | mcr p15, 0, ip, c1, c0 @ update control register | |
391 | #endif | |
1ec42c0c | 392 | enable_irq |
1da177e4 | 393 | |
1da177e4 | 394 | get_thread_info tsk |
dd35afc2 | 395 | adr tbl, sys_call_table @ load syscall table pointer |
dd35afc2 NP |
396 | |
397 | #if defined(CONFIG_OABI_COMPAT) | |
398 | /* | |
399 | * If the swi argument is zero, this is an EABI call and we do nothing. | |
400 | * | |
401 | * If this is an old ABI call, get the syscall number into scno and | |
402 | * get the old ABI syscall table address. | |
403 | */ | |
404 | bics r10, r10, #0xff000000 | |
405 | eorne scno, r10, #__NR_OABI_SYSCALL_BASE | |
406 | ldrne tbl, =sys_oabi_call_table | |
407 | #elif !defined(CONFIG_AEABI) | |
1da177e4 | 408 | bic scno, scno, #0xff000000 @ mask off SWI op-code |
e0f9f4a6 | 409 | eor scno, scno, #__NR_SYSCALL_BASE @ check OS number |
3f2829a3 | 410 | #endif |
dd35afc2 | 411 | |
70c70d97 | 412 | ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing |
3f2829a3 | 413 | stmdb sp!, {r4, r5} @ push fifth and sixth args |
70c70d97 NP |
414 | |
415 | #ifdef CONFIG_SECCOMP | |
416 | tst r10, #_TIF_SECCOMP | |
417 | beq 1f | |
418 | mov r0, scno | |
419 | bl __secure_computing | |
420 | add r0, sp, #S_R0 + S_OFF @ pointer to regs | |
421 | ldmia r0, {r0 - r3} @ have to reload r0 - r3 | |
422 | 1: | |
423 | #endif | |
424 | ||
29ef73b7 | 425 | tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? |
1da177e4 LT |
426 | bne __sys_trace |
427 | ||
1da177e4 | 428 | cmp scno, #NR_syscalls @ check upper syscall limit |
b86040a5 | 429 | adr lr, BSYM(ret_fast_syscall) @ return address |
1da177e4 LT |
430 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine |
431 | ||
432 | add r1, sp, #S_OFF | |
433 | 2: mov why, #0 @ no longer a real syscall | |
e0f9f4a6 RK |
434 | cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) |
435 | eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back | |
1da177e4 LT |
436 | bcs arm_syscall |
437 | b sys_ni_syscall @ not private func | |
93ed3970 | 438 | ENDPROC(vector_swi) |
1da177e4 LT |
439 | |
440 | /* | |
441 | * This is the really slow path. We're going to be doing | |
442 | * context switches, and waiting for our parent to respond. | |
443 | */ | |
444 | __sys_trace: | |
ad722541 WD |
445 | mov r1, scno |
446 | add r0, sp, #S_OFF | |
447 | bl syscall_trace_enter | |
1da177e4 | 448 | |
b86040a5 | 449 | adr lr, BSYM(__sys_trace_return) @ return address |
3f471126 | 450 | mov scno, r0 @ syscall number (possibly new) |
1da177e4 LT |
451 | add r1, sp, #S_R0 + S_OFF @ pointer to regs |
452 | cmp scno, #NR_syscalls @ check upper syscall limit | |
453 | ldmccia r1, {r0 - r3} @ have to reload r0 - r3 | |
454 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine | |
455 | b 2b | |
456 | ||
457 | __sys_trace_return: | |
458 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 | |
ad722541 WD |
459 | mov r1, scno |
460 | mov r0, sp | |
461 | bl syscall_trace_exit | |
1da177e4 LT |
462 | b ret_slow_syscall |
463 | ||
464 | .align 5 | |
465 | #ifdef CONFIG_ALIGNMENT_TRAP | |
466 | .type __cr_alignment, #object | |
467 | __cr_alignment: | |
468 | .word cr_alignment | |
dd35afc2 NP |
469 | #endif |
470 | .ltorg | |
471 | ||
472 | /* | |
473 | * This is the syscall table declaration for native ABI syscalls. | |
474 | * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. | |
475 | */ | |
476 | #define ABI(native, compat) native | |
477 | #ifdef CONFIG_AEABI | |
478 | #define OBSOLETE(syscall) sys_ni_syscall | |
479 | #else | |
480 | #define OBSOLETE(syscall) syscall | |
1da177e4 LT |
481 | #endif |
482 | ||
483 | .type sys_call_table, #object | |
484 | ENTRY(sys_call_table) | |
485 | #include "calls.S" | |
dd35afc2 NP |
486 | #undef ABI |
487 | #undef OBSOLETE | |
1da177e4 LT |
488 | |
489 | /*============================================================================ | |
490 | * Special system call wrappers | |
491 | */ | |
492 | @ r0 = syscall number | |
567bd980 | 493 | @ r8 = syscall table |
1da177e4 | 494 | sys_syscall: |
5247593c | 495 | bic scno, r0, #__NR_OABI_SYSCALL_BASE |
1da177e4 LT |
496 | cmp scno, #__NR_syscall - __NR_SYSCALL_BASE |
497 | cmpne scno, #NR_syscalls @ check range | |
498 | stmloia sp, {r5, r6} @ shuffle args | |
499 | movlo r0, r1 | |
500 | movlo r1, r2 | |
501 | movlo r2, r3 | |
502 | movlo r3, r4 | |
503 | ldrlo pc, [tbl, scno, lsl #2] | |
504 | b sys_ni_syscall | |
93ed3970 | 505 | ENDPROC(sys_syscall) |
1da177e4 LT |
506 | |
507 | sys_fork_wrapper: | |
508 | add r0, sp, #S_OFF | |
509 | b sys_fork | |
93ed3970 | 510 | ENDPROC(sys_fork_wrapper) |
1da177e4 LT |
511 | |
512 | sys_vfork_wrapper: | |
513 | add r0, sp, #S_OFF | |
514 | b sys_vfork | |
93ed3970 | 515 | ENDPROC(sys_vfork_wrapper) |
1da177e4 LT |
516 | |
517 | sys_execve_wrapper: | |
518 | add r3, sp, #S_OFF | |
519 | b sys_execve | |
93ed3970 | 520 | ENDPROC(sys_execve_wrapper) |
1da177e4 LT |
521 | |
522 | sys_clone_wrapper: | |
523 | add ip, sp, #S_OFF | |
524 | str ip, [sp, #4] | |
525 | b sys_clone | |
93ed3970 | 526 | ENDPROC(sys_clone_wrapper) |
1da177e4 | 527 | |
1da177e4 LT |
528 | sys_sigreturn_wrapper: |
529 | add r0, sp, #S_OFF | |
653d48b2 | 530 | mov why, #0 @ prevent syscall restart handling |
1da177e4 | 531 | b sys_sigreturn |
93ed3970 | 532 | ENDPROC(sys_sigreturn_wrapper) |
1da177e4 LT |
533 | |
534 | sys_rt_sigreturn_wrapper: | |
535 | add r0, sp, #S_OFF | |
653d48b2 | 536 | mov why, #0 @ prevent syscall restart handling |
1da177e4 | 537 | b sys_rt_sigreturn |
93ed3970 | 538 | ENDPROC(sys_rt_sigreturn_wrapper) |
1da177e4 LT |
539 | |
540 | sys_sigaltstack_wrapper: | |
541 | ldr r2, [sp, #S_OFF + S_SP] | |
542 | b do_sigaltstack | |
93ed3970 | 543 | ENDPROC(sys_sigaltstack_wrapper) |
1da177e4 | 544 | |
713c4815 NP |
545 | sys_statfs64_wrapper: |
546 | teq r1, #88 | |
547 | moveq r1, #84 | |
548 | b sys_statfs64 | |
93ed3970 | 549 | ENDPROC(sys_statfs64_wrapper) |
713c4815 NP |
550 | |
551 | sys_fstatfs64_wrapper: | |
552 | teq r1, #88 | |
553 | moveq r1, #84 | |
554 | b sys_fstatfs64 | |
93ed3970 | 555 | ENDPROC(sys_fstatfs64_wrapper) |
713c4815 | 556 | |
1da177e4 LT |
557 | /* |
558 | * Note: off_4k (r5) is always units of 4K. If we can't do the requested | |
559 | * offset, we return EINVAL. | |
560 | */ | |
561 | sys_mmap2: | |
562 | #if PAGE_SHIFT > 12 | |
563 | tst r5, #PGOFF_MASK | |
564 | moveq r5, r5, lsr #PAGE_SHIFT - 12 | |
565 | streq r5, [sp, #4] | |
f8b72560 | 566 | beq sys_mmap_pgoff |
1da177e4 | 567 | mov r0, #-EINVAL |
7999d8d7 | 568 | mov pc, lr |
1da177e4 LT |
569 | #else |
570 | str r5, [sp, #4] | |
f8b72560 | 571 | b sys_mmap_pgoff |
1da177e4 | 572 | #endif |
93ed3970 | 573 | ENDPROC(sys_mmap2) |
687ad019 NP |
574 | |
575 | #ifdef CONFIG_OABI_COMPAT | |
dd35afc2 | 576 | |
687ad019 NP |
577 | /* |
578 | * These are syscalls with argument register differences | |
579 | */ | |
580 | ||
581 | sys_oabi_pread64: | |
582 | stmia sp, {r3, r4} | |
583 | b sys_pread64 | |
93ed3970 | 584 | ENDPROC(sys_oabi_pread64) |
687ad019 NP |
585 | |
586 | sys_oabi_pwrite64: | |
587 | stmia sp, {r3, r4} | |
588 | b sys_pwrite64 | |
93ed3970 | 589 | ENDPROC(sys_oabi_pwrite64) |
687ad019 NP |
590 | |
591 | sys_oabi_truncate64: | |
592 | mov r3, r2 | |
593 | mov r2, r1 | |
594 | b sys_truncate64 | |
93ed3970 | 595 | ENDPROC(sys_oabi_truncate64) |
687ad019 NP |
596 | |
597 | sys_oabi_ftruncate64: | |
598 | mov r3, r2 | |
599 | mov r2, r1 | |
600 | b sys_ftruncate64 | |
93ed3970 | 601 | ENDPROC(sys_oabi_ftruncate64) |
687ad019 NP |
602 | |
603 | sys_oabi_readahead: | |
604 | str r3, [sp] | |
605 | mov r3, r2 | |
606 | mov r2, r1 | |
607 | b sys_readahead | |
93ed3970 | 608 | ENDPROC(sys_oabi_readahead) |
687ad019 | 609 | |
dd35afc2 NP |
610 | /* |
611 | * Let's declare a second syscall table for old ABI binaries | |
612 | * using the compatibility syscall entries. | |
613 | */ | |
614 | #define ABI(native, compat) compat | |
615 | #define OBSOLETE(syscall) syscall | |
616 | ||
617 | .type sys_oabi_call_table, #object | |
618 | ENTRY(sys_oabi_call_table) | |
619 | #include "calls.S" | |
620 | #undef ABI | |
621 | #undef OBSOLETE | |
622 | ||
687ad019 NP |
623 | #endif |
624 |