Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/entry-common.S | |
3 | * | |
4 | * Copyright (C) 2000 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
1da177e4 | 10 | |
1da177e4 | 11 | #include <asm/unistd.h> |
395a59d0 | 12 | #include <asm/ftrace.h> |
c4c5716e | 13 | #include <asm/unwind.h> |
1da177e4 | 14 | |
13a5045d RH |
15 | #ifdef CONFIG_NEED_RET_TO_USER |
16 | #include <mach/entry-macro.S> | |
17 | #else | |
18 | .macro arch_ret_to_user, tmp1, tmp2 | |
19 | .endm | |
20 | #endif | |
21 | ||
1da177e4 LT |
22 | #include "entry-header.S" |
23 | ||
1da177e4 LT |
24 | |
25 | .align 5 | |
26 | /* | |
27 | * This is the fast syscall return path. We do as little as | |
28 | * possible here, and this includes saving r0 back into the SVC | |
29 | * stack. | |
30 | */ | |
31 | ret_fast_syscall: | |
c4c5716e CM |
32 | UNWIND(.fnstart ) |
33 | UNWIND(.cantunwind ) | |
1ec42c0c | 34 | disable_irq @ disable interrupts |
1da177e4 LT |
35 | ldr r1, [tsk, #TI_FLAGS] |
36 | tst r1, #_TIF_WORK_MASK | |
37 | bne fast_work_pending | |
d13e5edd | 38 | asm_trace_hardirqs_on |
f4dc9a4c | 39 | |
f80dff9d DW |
40 | /* perform architecture specific actions before user return */ |
41 | arch_ret_to_user r1, lr | |
b0088480 | 42 | ct_user_enter |
f80dff9d | 43 | |
b86040a5 | 44 | restore_user_regs fast = 1, offset = S_OFF |
c4c5716e | 45 | UNWIND(.fnend ) |
1da177e4 LT |
46 | |
47 | /* | |
48 | * Ok, we need to do extra processing, enter the slow path. | |
49 | */ | |
50 | fast_work_pending: | |
51 | str r0, [sp, #S_R0+S_OFF]! @ returned r0 | |
52 | work_pending: | |
1da177e4 LT |
53 | mov r0, sp @ 'regs' |
54 | mov r2, why @ 'syscall' | |
0a267fa6 | 55 | bl do_work_pending |
66285217 | 56 | cmp r0, #0 |
81783786 | 57 | beq no_work_pending |
66285217 | 58 | movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) |
81783786 AV |
59 | ldmia sp, {r0 - r6} @ have to reload r0 - r6 |
60 | b local_restart @ ... and off we go | |
61 | ||
1da177e4 LT |
62 | /* |
63 | * "slow" syscall return path. "why" tells us if this was a real syscall. | |
64 | */ | |
65 | ENTRY(ret_to_user) | |
66 | ret_slow_syscall: | |
1ec42c0c | 67 | disable_irq @ disable interrupts |
9fc2552a | 68 | ENTRY(ret_to_user_from_irq) |
1da177e4 LT |
69 | ldr r1, [tsk, #TI_FLAGS] |
70 | tst r1, #_TIF_WORK_MASK | |
71 | bne work_pending | |
72 | no_work_pending: | |
d13e5edd | 73 | asm_trace_hardirqs_on |
651e9499 | 74 | |
f80dff9d DW |
75 | /* perform architecture specific actions before user return */ |
76 | arch_ret_to_user r1, lr | |
b0088480 | 77 | ct_user_enter save = 0 |
f80dff9d | 78 | |
b86040a5 | 79 | restore_user_regs fast = 0, offset = 0 |
9fc2552a | 80 | ENDPROC(ret_to_user_from_irq) |
93ed3970 | 81 | ENDPROC(ret_to_user) |
1da177e4 LT |
82 | |
83 | /* | |
84 | * This is how we return from a fork. | |
85 | */ | |
86 | ENTRY(ret_from_fork) | |
87 | bl schedule_tail | |
9fff2fa0 AV |
88 | cmp r5, #0 |
89 | movne r0, r4 | |
68687c84 | 90 | adrne lr, BSYM(1f) |
9fff2fa0 | 91 | movne pc, r5 |
68687c84 | 92 | 1: get_thread_info tsk |
1da177e4 | 93 | b ret_slow_syscall |
93ed3970 | 94 | ENDPROC(ret_from_fork) |
1da177e4 | 95 | |
fa1b4f91 AV |
96 | .equ NR_syscalls,0 |
97 | #define CALL(x) .equ NR_syscalls,NR_syscalls+1 | |
1da177e4 | 98 | #include "calls.S" |
1f66e06f WF |
99 | |
100 | /* | |
101 | * Ensure that the system call table is equal to __NR_syscalls, | |
102 | * which is the value the rest of the system sees | |
103 | */ | |
104 | .ifne NR_syscalls - __NR_syscalls | |
105 | .error "__NR_syscalls is not equal to the size of the syscall table" | |
106 | .endif | |
107 | ||
fa1b4f91 AV |
108 | #undef CALL |
109 | #define CALL(x) .long x | |
1da177e4 | 110 | |
606576ce | 111 | #ifdef CONFIG_FUNCTION_TRACER |
686ff228 RV |
112 | /* |
113 | * When compiling with -pg, gcc inserts a call to the mcount routine at the | |
114 | * start of every function. In mcount, apart from the function's address (in | |
115 | * lr), we need to get hold of the function's caller's address. | |
116 | * | |
117 | * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: | |
118 | * | |
119 | * bl mcount | |
120 | * | |
121 | * These versions have the limitation that in order for the mcount routine to | |
122 | * be able to determine the function's caller's address, an APCS-style frame | |
123 | * pointer (which is set up with something like the code below) is required. | |
124 | * | |
125 | * mov ip, sp | |
126 | * push {fp, ip, lr, pc} | |
127 | * sub fp, ip, #4 | |
128 | * | |
129 | * With EABI, these frame pointers are not available unless -mapcs-frame is | |
130 | * specified, and if building as Thumb-2, not even then. | |
131 | * | |
132 | * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, | |
133 | * with call sites like: | |
134 | * | |
135 | * push {lr} | |
136 | * bl __gnu_mcount_nc | |
137 | * | |
138 | * With these compilers, frame pointers are not necessary. | |
139 | * | |
140 | * mcount can be thought of as a function called in the middle of a subroutine | |
141 | * call. As such, it needs to be transparent for both the caller and the | |
142 | * callee: the original lr needs to be restored when leaving mcount, and no | |
143 | * registers should be clobbered. (In the __gnu_mcount_nc implementation, we | |
144 | * clobber the ip register. This is OK because the ARM calling convention | |
145 | * allows it to be clobbered in subroutines and doesn't use it to hold | |
146 | * parameters.) | |
3b6c223b RV |
147 | * |
148 | * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" | |
149 | * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see | |
150 | * arch/arm/kernel/ftrace.c). | |
686ff228 | 151 | */ |
09bfafac RV |
152 | |
153 | #ifndef CONFIG_OLD_MCOUNT | |
154 | #if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) | |
155 | #error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. | |
156 | #endif | |
157 | #endif | |
158 | ||
d68133b5 RV |
159 | .macro mcount_adjust_addr rd, rn |
160 | bic \rd, \rn, #1 @ clear the Thumb bit if present | |
161 | sub \rd, \rd, #MCOUNT_INSN_SIZE | |
162 | .endm | |
163 | ||
d3b9dc9d RV |
164 | .macro __mcount suffix |
165 | mcount_enter | |
166 | ldr r0, =ftrace_trace_function | |
167 | ldr r2, [r0] | |
168 | adr r0, .Lftrace_stub | |
169 | cmp r0, r2 | |
170 | bne 1f | |
3b6c223b | 171 | |
376cfa87 TB |
172 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
173 | ldr r1, =ftrace_graph_return | |
174 | ldr r2, [r1] | |
175 | cmp r0, r2 | |
176 | bne ftrace_graph_caller\suffix | |
177 | ||
178 | ldr r1, =ftrace_graph_entry | |
179 | ldr r2, [r1] | |
180 | ldr r0, =ftrace_graph_entry_stub | |
181 | cmp r0, r2 | |
182 | bne ftrace_graph_caller\suffix | |
183 | #endif | |
184 | ||
d3b9dc9d | 185 | mcount_exit |
3b6c223b | 186 | |
d3b9dc9d | 187 | 1: mcount_get_lr r1 @ lr of instrumented func |
d68133b5 | 188 | mcount_adjust_addr r0, lr @ instrumented function |
d3b9dc9d RV |
189 | adr lr, BSYM(2f) |
190 | mov pc, r2 | |
191 | 2: mcount_exit | |
192 | .endm | |
014c257c | 193 | |
d3b9dc9d RV |
194 | .macro __ftrace_caller suffix |
195 | mcount_enter | |
014c257c | 196 | |
d3b9dc9d | 197 | mcount_get_lr r1 @ lr of instrumented func |
d68133b5 | 198 | mcount_adjust_addr r0, lr @ instrumented function |
d3b9dc9d RV |
199 | |
200 | .globl ftrace_call\suffix | |
201 | ftrace_call\suffix: | |
28e192d6 | 202 | bl ftrace_stub |
d3b9dc9d | 203 | |
dd686eb1 RV |
204 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
205 | .globl ftrace_graph_call\suffix | |
206 | ftrace_graph_call\suffix: | |
207 | mov r0, r0 | |
208 | #endif | |
209 | ||
d3b9dc9d RV |
210 | mcount_exit |
211 | .endm | |
014c257c | 212 | |
376cfa87 TB |
213 | .macro __ftrace_graph_caller |
214 | sub r0, fp, #4 @ &lr of instrumented routine (&parent) | |
dd686eb1 RV |
215 | #ifdef CONFIG_DYNAMIC_FTRACE |
216 | @ called from __ftrace_caller, saved in mcount_enter | |
217 | ldr r1, [sp, #16] @ instrumented routine (func) | |
d68133b5 | 218 | mcount_adjust_addr r1, r1 |
dd686eb1 RV |
219 | #else |
220 | @ called from __mcount, untouched in lr | |
d68133b5 | 221 | mcount_adjust_addr r1, lr @ instrumented routine (func) |
dd686eb1 | 222 | #endif |
376cfa87 TB |
223 | mov r2, fp @ frame pointer |
224 | bl prepare_ftrace_return | |
225 | mcount_exit | |
226 | .endm | |
014c257c | 227 | |
3b6c223b | 228 | #ifdef CONFIG_OLD_MCOUNT |
d3b9dc9d RV |
229 | /* |
230 | * mcount | |
231 | */ | |
232 | ||
233 | .macro mcount_enter | |
234 | stmdb sp!, {r0-r3, lr} | |
235 | .endm | |
236 | ||
237 | .macro mcount_get_lr reg | |
238 | ldr \reg, [fp, #-4] | |
239 | .endm | |
240 | ||
241 | .macro mcount_exit | |
242 | ldr lr, [fp, #-4] | |
243 | ldmia sp!, {r0-r3, pc} | |
244 | .endm | |
245 | ||
3b6c223b | 246 | ENTRY(mcount) |
d3b9dc9d | 247 | #ifdef CONFIG_DYNAMIC_FTRACE |
3b6c223b RV |
248 | stmdb sp!, {lr} |
249 | ldr lr, [fp, #-4] | |
250 | ldmia sp!, {pc} | |
d3b9dc9d RV |
251 | #else |
252 | __mcount _old | |
253 | #endif | |
72fa62fa | 254 | ENDPROC(mcount) |
014c257c | 255 | |
d3b9dc9d | 256 | #ifdef CONFIG_DYNAMIC_FTRACE |
3b6c223b | 257 | ENTRY(ftrace_caller_old) |
d3b9dc9d | 258 | __ftrace_caller _old |
3b6c223b RV |
259 | ENDPROC(ftrace_caller_old) |
260 | #endif | |
014c257c | 261 | |
376cfa87 TB |
262 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
263 | ENTRY(ftrace_graph_caller_old) | |
264 | __ftrace_graph_caller | |
265 | ENDPROC(ftrace_graph_caller_old) | |
266 | #endif | |
014c257c | 267 | |
d3b9dc9d RV |
268 | .purgem mcount_enter |
269 | .purgem mcount_get_lr | |
270 | .purgem mcount_exit | |
271 | #endif | |
014c257c | 272 | |
d3b9dc9d RV |
273 | /* |
274 | * __gnu_mcount_nc | |
275 | */ | |
276 | ||
277 | .macro mcount_enter | |
b21e023b RV |
278 | /* |
279 | * This pad compensates for the push {lr} at the call site. Note that we are | |
280 | * unable to unwind through a function which does not otherwise save its lr. | |
281 | */ | |
282 | UNWIND(.pad #4) | |
28e192d6 | 283 | stmdb sp!, {r0-r3, lr} |
b21e023b | 284 | UNWIND(.save {r0-r3, lr}) |
d3b9dc9d RV |
285 | .endm |
286 | ||
287 | .macro mcount_get_lr reg | |
288 | ldr \reg, [sp, #20] | |
289 | .endm | |
290 | ||
291 | .macro mcount_exit | |
28e192d6 RV |
292 | ldmia sp!, {r0-r3, ip, lr} |
293 | mov pc, ip | |
d3b9dc9d | 294 | .endm |
181f817e | 295 | |
d3b9dc9d | 296 | ENTRY(__gnu_mcount_nc) |
b21e023b | 297 | UNWIND(.fnstart) |
d3b9dc9d RV |
298 | #ifdef CONFIG_DYNAMIC_FTRACE |
299 | mov ip, lr | |
300 | ldmia sp!, {lr} | |
28e192d6 | 301 | mov pc, ip |
d3b9dc9d RV |
302 | #else |
303 | __mcount | |
304 | #endif | |
b21e023b | 305 | UNWIND(.fnend) |
72fa62fa | 306 | ENDPROC(__gnu_mcount_nc) |
181f817e | 307 | |
d3b9dc9d RV |
308 | #ifdef CONFIG_DYNAMIC_FTRACE |
309 | ENTRY(ftrace_caller) | |
b21e023b | 310 | UNWIND(.fnstart) |
d3b9dc9d | 311 | __ftrace_caller |
b21e023b | 312 | UNWIND(.fnend) |
d3b9dc9d | 313 | ENDPROC(ftrace_caller) |
09bfafac | 314 | #endif |
014c257c | 315 | |
376cfa87 TB |
316 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
317 | ENTRY(ftrace_graph_caller) | |
b21e023b | 318 | UNWIND(.fnstart) |
376cfa87 | 319 | __ftrace_graph_caller |
b21e023b | 320 | UNWIND(.fnend) |
376cfa87 | 321 | ENDPROC(ftrace_graph_caller) |
09bfafac | 322 | #endif |
014c257c | 323 | |
d3b9dc9d RV |
324 | .purgem mcount_enter |
325 | .purgem mcount_get_lr | |
326 | .purgem mcount_exit | |
014c257c | 327 | |
376cfa87 TB |
328 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
329 | .globl return_to_handler | |
330 | return_to_handler: | |
331 | stmdb sp!, {r0-r3} | |
332 | mov r0, fp @ frame pointer | |
333 | bl ftrace_return_to_handler | |
334 | mov lr, r0 @ r0 has real ret addr | |
335 | ldmia sp!, {r0-r3} | |
336 | mov pc, lr | |
337 | #endif | |
014c257c | 338 | |
72fa62fa | 339 | ENTRY(ftrace_stub) |
a3ba87a6 | 340 | .Lftrace_stub: |
28e192d6 | 341 | mov pc, lr |
72fa62fa | 342 | ENDPROC(ftrace_stub) |
014c257c | 343 | |
606576ce | 344 | #endif /* CONFIG_FUNCTION_TRACER */ |
014c257c | 345 | |
1da177e4 LT |
346 | /*============================================================================= |
347 | * SWI handler | |
348 | *----------------------------------------------------------------------------- | |
349 | */ | |
350 | ||
1da177e4 LT |
351 | .align 5 |
352 | ENTRY(vector_swi) | |
19c4d593 UKK |
353 | #ifdef CONFIG_CPU_V7M |
354 | v7m_exception_entry | |
355 | #else | |
f4dc9a4c RK |
356 | sub sp, sp, #S_FRAME_SIZE |
357 | stmia sp, {r0 - r12} @ Calling r0 - r12 | |
b86040a5 CM |
358 | ARM( add r8, sp, #S_PC ) |
359 | ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr | |
360 | THUMB( mov r8, sp ) | |
361 | THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr | |
f4dc9a4c RK |
362 | mrs r8, spsr @ called from non-FIQ mode, so ok. |
363 | str lr, [sp, #S_PC] @ Save calling PC | |
364 | str r8, [sp, #S_PSR] @ Save CPSR | |
365 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 | |
19c4d593 | 366 | #endif |
1da177e4 | 367 | zero_fp |
8229c54f | 368 | alignment_trap ip, __cr_alignment |
1aa2b3b7 WD |
369 | enable_irq |
370 | ct_user_exit | |
371 | get_thread_info tsk | |
372 | ||
e0f9f4a6 RK |
373 | /* |
374 | * Get the system call number. | |
375 | */ | |
3f2829a3 | 376 | |
dd35afc2 | 377 | #if defined(CONFIG_OABI_COMPAT) |
3f2829a3 | 378 | |
dd35afc2 NP |
379 | /* |
380 | * If we have CONFIG_OABI_COMPAT then we need to look at the swi | |
381 | * value to determine if it is an EABI or an old ABI call. | |
382 | */ | |
383 | #ifdef CONFIG_ARM_THUMB | |
384 | tst r8, #PSR_T_BIT | |
385 | movne r10, #0 @ no thumb OABI emulation | |
1aa2b3b7 | 386 | USER( ldreq r10, [lr, #-4] ) @ get SWI instruction |
dd35afc2 | 387 | #else |
1aa2b3b7 | 388 | USER( ldr r10, [lr, #-4] ) @ get SWI instruction |
dd35afc2 | 389 | #endif |
457c2403 | 390 | ARM_BE8(rev r10, r10) @ little endian instruction |
dd35afc2 NP |
391 | |
392 | #elif defined(CONFIG_AEABI) | |
393 | ||
394 | /* | |
395 | * Pure EABI user space always put syscall number into scno (r7). | |
396 | */ | |
3f2829a3 | 397 | #elif defined(CONFIG_ARM_THUMB) |
dd35afc2 | 398 | /* Legacy ABI only, possibly thumb mode. */ |
e0f9f4a6 RK |
399 | tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs |
400 | addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in | |
1aa2b3b7 | 401 | USER( ldreq scno, [lr, #-4] ) |
dd35afc2 | 402 | |
e0f9f4a6 | 403 | #else |
dd35afc2 | 404 | /* Legacy ABI only. */ |
1aa2b3b7 | 405 | USER( ldr scno, [lr, #-4] ) @ get SWI instruction |
e0f9f4a6 | 406 | #endif |
1da177e4 | 407 | |
dd35afc2 | 408 | adr tbl, sys_call_table @ load syscall table pointer |
dd35afc2 NP |
409 | |
410 | #if defined(CONFIG_OABI_COMPAT) | |
411 | /* | |
412 | * If the swi argument is zero, this is an EABI call and we do nothing. | |
413 | * | |
414 | * If this is an old ABI call, get the syscall number into scno and | |
415 | * get the old ABI syscall table address. | |
416 | */ | |
417 | bics r10, r10, #0xff000000 | |
418 | eorne scno, r10, #__NR_OABI_SYSCALL_BASE | |
419 | ldrne tbl, =sys_oabi_call_table | |
420 | #elif !defined(CONFIG_AEABI) | |
1da177e4 | 421 | bic scno, scno, #0xff000000 @ mask off SWI op-code |
e0f9f4a6 | 422 | eor scno, scno, #__NR_SYSCALL_BASE @ check OS number |
3f2829a3 | 423 | #endif |
dd35afc2 | 424 | |
81783786 | 425 | local_restart: |
70c70d97 | 426 | ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing |
3f2829a3 | 427 | stmdb sp!, {r4, r5} @ push fifth and sixth args |
70c70d97 | 428 | |
29ef73b7 | 429 | tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? |
1da177e4 LT |
430 | bne __sys_trace |
431 | ||
1da177e4 | 432 | cmp scno, #NR_syscalls @ check upper syscall limit |
b86040a5 | 433 | adr lr, BSYM(ret_fast_syscall) @ return address |
1da177e4 LT |
434 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine |
435 | ||
436 | add r1, sp, #S_OFF | |
d95bc250 | 437 | 2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) |
e0f9f4a6 | 438 | eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back |
377747c4 | 439 | bcs arm_syscall |
d95bc250 | 440 | mov why, #0 @ no longer a real syscall |
1da177e4 | 441 | b sys_ni_syscall @ not private func |
1aa2b3b7 WD |
442 | |
443 | #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) | |
444 | /* | |
445 | * We failed to handle a fault trying to access the page | |
446 | * containing the swi instruction, but we're not really in a | |
447 | * position to return -EFAULT. Instead, return back to the | |
448 | * instruction and re-enter the user fault handling path trying | |
449 | * to page it in. This will likely result in sending SEGV to the | |
450 | * current task. | |
451 | */ | |
452 | 9001: | |
453 | sub lr, lr, #4 | |
454 | str lr, [sp, #S_PC] | |
455 | b ret_fast_syscall | |
456 | #endif | |
93ed3970 | 457 | ENDPROC(vector_swi) |
1da177e4 LT |
458 | |
459 | /* | |
460 | * This is the really slow path. We're going to be doing | |
461 | * context switches, and waiting for our parent to respond. | |
462 | */ | |
463 | __sys_trace: | |
ad722541 WD |
464 | mov r1, scno |
465 | add r0, sp, #S_OFF | |
466 | bl syscall_trace_enter | |
1da177e4 | 467 | |
b86040a5 | 468 | adr lr, BSYM(__sys_trace_return) @ return address |
3f471126 | 469 | mov scno, r0 @ syscall number (possibly new) |
1da177e4 LT |
470 | add r1, sp, #S_R0 + S_OFF @ pointer to regs |
471 | cmp scno, #NR_syscalls @ check upper syscall limit | |
c7aa00db WD |
472 | ldmccia r1, {r0 - r6} @ have to reload r0 - r6 |
473 | stmccia sp, {r4, r5} @ and update the stack args | |
1da177e4 | 474 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine |
ad75b514 KC |
475 | cmp scno, #-1 @ skip the syscall? |
476 | bne 2b | |
477 | add sp, sp, #S_OFF @ restore stack | |
478 | b ret_slow_syscall | |
1da177e4 LT |
479 | |
480 | __sys_trace_return: | |
481 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 | |
ad722541 WD |
482 | mov r0, sp |
483 | bl syscall_trace_exit | |
1da177e4 LT |
484 | b ret_slow_syscall |
485 | ||
486 | .align 5 | |
487 | #ifdef CONFIG_ALIGNMENT_TRAP | |
488 | .type __cr_alignment, #object | |
489 | __cr_alignment: | |
490 | .word cr_alignment | |
dd35afc2 NP |
491 | #endif |
492 | .ltorg | |
493 | ||
494 | /* | |
495 | * This is the syscall table declaration for native ABI syscalls. | |
496 | * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. | |
497 | */ | |
498 | #define ABI(native, compat) native | |
499 | #ifdef CONFIG_AEABI | |
500 | #define OBSOLETE(syscall) sys_ni_syscall | |
501 | #else | |
502 | #define OBSOLETE(syscall) syscall | |
1da177e4 LT |
503 | #endif |
504 | ||
505 | .type sys_call_table, #object | |
506 | ENTRY(sys_call_table) | |
507 | #include "calls.S" | |
dd35afc2 NP |
508 | #undef ABI |
509 | #undef OBSOLETE | |
1da177e4 LT |
510 | |
511 | /*============================================================================ | |
512 | * Special system call wrappers | |
513 | */ | |
514 | @ r0 = syscall number | |
567bd980 | 515 | @ r8 = syscall table |
1da177e4 | 516 | sys_syscall: |
5247593c | 517 | bic scno, r0, #__NR_OABI_SYSCALL_BASE |
1da177e4 LT |
518 | cmp scno, #__NR_syscall - __NR_SYSCALL_BASE |
519 | cmpne scno, #NR_syscalls @ check range | |
520 | stmloia sp, {r5, r6} @ shuffle args | |
521 | movlo r0, r1 | |
522 | movlo r1, r2 | |
523 | movlo r2, r3 | |
524 | movlo r3, r4 | |
525 | ldrlo pc, [tbl, scno, lsl #2] | |
526 | b sys_ni_syscall | |
93ed3970 | 527 | ENDPROC(sys_syscall) |
1da177e4 | 528 | |
1da177e4 LT |
529 | sys_sigreturn_wrapper: |
530 | add r0, sp, #S_OFF | |
653d48b2 | 531 | mov why, #0 @ prevent syscall restart handling |
1da177e4 | 532 | b sys_sigreturn |
93ed3970 | 533 | ENDPROC(sys_sigreturn_wrapper) |
1da177e4 LT |
534 | |
535 | sys_rt_sigreturn_wrapper: | |
536 | add r0, sp, #S_OFF | |
653d48b2 | 537 | mov why, #0 @ prevent syscall restart handling |
1da177e4 | 538 | b sys_rt_sigreturn |
93ed3970 | 539 | ENDPROC(sys_rt_sigreturn_wrapper) |
1da177e4 | 540 | |
713c4815 NP |
541 | sys_statfs64_wrapper: |
542 | teq r1, #88 | |
543 | moveq r1, #84 | |
544 | b sys_statfs64 | |
93ed3970 | 545 | ENDPROC(sys_statfs64_wrapper) |
713c4815 NP |
546 | |
547 | sys_fstatfs64_wrapper: | |
548 | teq r1, #88 | |
549 | moveq r1, #84 | |
550 | b sys_fstatfs64 | |
93ed3970 | 551 | ENDPROC(sys_fstatfs64_wrapper) |
713c4815 | 552 | |
1da177e4 LT |
553 | /* |
554 | * Note: off_4k (r5) is always units of 4K. If we can't do the requested | |
555 | * offset, we return EINVAL. | |
556 | */ | |
557 | sys_mmap2: | |
558 | #if PAGE_SHIFT > 12 | |
559 | tst r5, #PGOFF_MASK | |
560 | moveq r5, r5, lsr #PAGE_SHIFT - 12 | |
561 | streq r5, [sp, #4] | |
f8b72560 | 562 | beq sys_mmap_pgoff |
1da177e4 | 563 | mov r0, #-EINVAL |
7999d8d7 | 564 | mov pc, lr |
1da177e4 LT |
565 | #else |
566 | str r5, [sp, #4] | |
f8b72560 | 567 | b sys_mmap_pgoff |
1da177e4 | 568 | #endif |
93ed3970 | 569 | ENDPROC(sys_mmap2) |
687ad019 NP |
570 | |
571 | #ifdef CONFIG_OABI_COMPAT | |
dd35afc2 | 572 | |
687ad019 NP |
573 | /* |
574 | * These are syscalls with argument register differences | |
575 | */ | |
576 | ||
577 | sys_oabi_pread64: | |
578 | stmia sp, {r3, r4} | |
579 | b sys_pread64 | |
93ed3970 | 580 | ENDPROC(sys_oabi_pread64) |
687ad019 NP |
581 | |
582 | sys_oabi_pwrite64: | |
583 | stmia sp, {r3, r4} | |
584 | b sys_pwrite64 | |
93ed3970 | 585 | ENDPROC(sys_oabi_pwrite64) |
687ad019 NP |
586 | |
587 | sys_oabi_truncate64: | |
588 | mov r3, r2 | |
589 | mov r2, r1 | |
590 | b sys_truncate64 | |
93ed3970 | 591 | ENDPROC(sys_oabi_truncate64) |
687ad019 NP |
592 | |
593 | sys_oabi_ftruncate64: | |
594 | mov r3, r2 | |
595 | mov r2, r1 | |
596 | b sys_ftruncate64 | |
93ed3970 | 597 | ENDPROC(sys_oabi_ftruncate64) |
687ad019 NP |
598 | |
599 | sys_oabi_readahead: | |
600 | str r3, [sp] | |
601 | mov r3, r2 | |
602 | mov r2, r1 | |
603 | b sys_readahead | |
93ed3970 | 604 | ENDPROC(sys_oabi_readahead) |
687ad019 | 605 | |
dd35afc2 NP |
606 | /* |
607 | * Let's declare a second syscall table for old ABI binaries | |
608 | * using the compatibility syscall entries. | |
609 | */ | |
610 | #define ABI(native, compat) compat | |
611 | #define OBSOLETE(syscall) syscall | |
612 | ||
613 | .type sys_oabi_call_table, #object | |
614 | ENTRY(sys_oabi_call_table) | |
615 | #include "calls.S" | |
616 | #undef ABI | |
617 | #undef OBSOLETE | |
618 | ||
687ad019 NP |
619 | #endif |
620 |