| 1 | /* |
| 2 | * linux/arch/arm/kernel/entry-common.S |
| 3 | * |
| 4 | * Copyright (C) 2000 Russell King |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | |
| 11 | #include <asm/assembler.h> |
| 12 | #include <asm/unistd.h> |
| 13 | #include <asm/ftrace.h> |
| 14 | #include <asm/unwind.h> |
| 15 | |
| 16 | #ifdef CONFIG_NEED_RET_TO_USER |
| 17 | #include <mach/entry-macro.S> |
| 18 | #else |
| 19 | .macro arch_ret_to_user, tmp1, tmp2 |
| 20 | .endm |
| 21 | #endif |
| 22 | |
| 23 | #include "entry-header.S" |
| 24 | |
| 25 | |
| 26 | .align 5 |
| 27 | #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING)) |
| 28 | /* |
| 29 | * This is the fast syscall return path. We do as little as possible here, |
| 30 | * such as avoiding writing r0 to the stack. We only use this path if we |
| 31 | * have tracing and context tracking disabled - the overheads from those |
| 32 | * features make this path too inefficient. |
| 33 | */ |
| 34 | ret_fast_syscall: |
| 35 | UNWIND(.fnstart ) |
| 36 | UNWIND(.cantunwind ) |
| 37 | disable_irq_notrace @ disable interrupts |
| 38 | ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing |
| 39 | tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK |
| 40 | bne fast_work_pending |
| 41 | |
| 42 | /* perform architecture specific actions before user return */ |
| 43 | arch_ret_to_user r1, lr |
| 44 | |
| 45 | restore_user_regs fast = 1, offset = S_OFF |
| 46 | UNWIND(.fnend ) |
| 47 | ENDPROC(ret_fast_syscall) |
| 48 | |
| 49 | /* Ok, we need to do extra processing, enter the slow path. */ |
| 50 | fast_work_pending: |
| 51 | str r0, [sp, #S_R0+S_OFF]! @ returned r0 |
| 52 | /* fall through to work_pending */ |
| 53 | #else |
| 54 | /* |
| 55 | * The "replacement" ret_fast_syscall for when tracing or context tracking |
| 56 | * is enabled. As we will need to call out to some C functions, we save |
| 57 | * r0 first to avoid needing to save registers around each C function call. |
| 58 | */ |
| 59 | ret_fast_syscall: |
| 60 | UNWIND(.fnstart ) |
| 61 | UNWIND(.cantunwind ) |
| 62 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 |
| 63 | disable_irq_notrace @ disable interrupts |
| 64 | ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing |
| 65 | tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK |
| 66 | beq no_work_pending |
| 67 | UNWIND(.fnend ) |
| 68 | ENDPROC(ret_fast_syscall) |
| 69 | |
| 70 | /* Slower path - fall through to work_pending */ |
| 71 | #endif |
| 72 | |
| 73 | tst r1, #_TIF_SYSCALL_WORK |
| 74 | bne __sys_trace_return_nosave |
| 75 | slow_work_pending: |
| 76 | mov r0, sp @ 'regs' |
| 77 | mov r2, why @ 'syscall' |
| 78 | bl do_work_pending |
| 79 | cmp r0, #0 |
| 80 | beq no_work_pending |
| 81 | movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) |
| 82 | ldmia sp, {r0 - r6} @ have to reload r0 - r6 |
| 83 | b local_restart @ ... and off we go |
| 84 | ENDPROC(ret_fast_syscall) |
| 85 | |
| 86 | /* |
| 87 | * "slow" syscall return path. "why" tells us if this was a real syscall. |
| 88 | * IRQs may be enabled here, so always disable them. Note that we use the |
| 89 | * "notrace" version to avoid calling into the tracing code unnecessarily. |
| 90 | * do_work_pending() will update this state if necessary. |
| 91 | */ |
| 92 | ENTRY(ret_to_user) |
| 93 | ret_slow_syscall: |
| 94 | disable_irq_notrace @ disable interrupts |
| 95 | ENTRY(ret_to_user_from_irq) |
| 96 | ldr r1, [tsk, #TI_FLAGS] |
| 97 | tst r1, #_TIF_WORK_MASK |
| 98 | bne slow_work_pending |
| 99 | no_work_pending: |
| 100 | asm_trace_hardirqs_on save = 0 |
| 101 | |
| 102 | /* perform architecture specific actions before user return */ |
| 103 | arch_ret_to_user r1, lr |
| 104 | ct_user_enter save = 0 |
| 105 | |
| 106 | restore_user_regs fast = 0, offset = 0 |
| 107 | ENDPROC(ret_to_user_from_irq) |
| 108 | ENDPROC(ret_to_user) |
| 109 | |
| 110 | /* |
| 111 | * This is how we return from a fork. |
| 112 | */ |
| 113 | ENTRY(ret_from_fork) |
| 114 | bl schedule_tail |
| 115 | cmp r5, #0 |
| 116 | movne r0, r4 |
| 117 | badrne lr, 1f |
| 118 | retne r5 |
| 119 | 1: get_thread_info tsk |
| 120 | b ret_slow_syscall |
| 121 | ENDPROC(ret_from_fork) |
| 122 | |
| 123 | .equ NR_syscalls,0 |
| 124 | #define CALL(x) .equ NR_syscalls,NR_syscalls+1 |
| 125 | #include "calls.S" |
| 126 | |
| 127 | /* |
| 128 | * Ensure that the system call table is equal to __NR_syscalls, |
| 129 | * which is the value the rest of the system sees |
| 130 | */ |
| 131 | .ifne NR_syscalls - __NR_syscalls |
| 132 | .error "__NR_syscalls is not equal to the size of the syscall table" |
| 133 | .endif |
| 134 | |
| 135 | #undef CALL |
| 136 | #define CALL(x) .long x |
| 137 | |
| 138 | /*============================================================================= |
| 139 | * SWI handler |
| 140 | *----------------------------------------------------------------------------- |
| 141 | */ |
| 142 | |
| 143 | .align 5 |
| 144 | ENTRY(vector_swi) |
| 145 | #ifdef CONFIG_CPU_V7M |
| 146 | v7m_exception_entry |
| 147 | #else |
| 148 | sub sp, sp, #S_FRAME_SIZE |
| 149 | stmia sp, {r0 - r12} @ Calling r0 - r12 |
| 150 | ARM( add r8, sp, #S_PC ) |
| 151 | ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr |
| 152 | THUMB( mov r8, sp ) |
| 153 | THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr |
| 154 | mrs r8, spsr @ called from non-FIQ mode, so ok. |
| 155 | str lr, [sp, #S_PC] @ Save calling PC |
| 156 | str r8, [sp, #S_PSR] @ Save CPSR |
| 157 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 |
| 158 | #endif |
| 159 | zero_fp |
| 160 | alignment_trap r10, ip, __cr_alignment |
| 161 | enable_irq |
| 162 | ct_user_exit |
| 163 | get_thread_info tsk |
| 164 | |
| 165 | /* |
| 166 | * Get the system call number. |
| 167 | */ |
| 168 | |
| 169 | #if defined(CONFIG_OABI_COMPAT) |
| 170 | |
| 171 | /* |
| 172 | * If we have CONFIG_OABI_COMPAT then we need to look at the swi |
| 173 | * value to determine if it is an EABI or an old ABI call. |
| 174 | */ |
| 175 | #ifdef CONFIG_ARM_THUMB |
| 176 | tst r8, #PSR_T_BIT |
| 177 | movne r10, #0 @ no thumb OABI emulation |
| 178 | USER( ldreq r10, [lr, #-4] ) @ get SWI instruction |
| 179 | #else |
| 180 | USER( ldr r10, [lr, #-4] ) @ get SWI instruction |
| 181 | #endif |
| 182 | ARM_BE8(rev r10, r10) @ little endian instruction |
| 183 | |
| 184 | #elif defined(CONFIG_AEABI) |
| 185 | |
| 186 | /* |
| 187 | * Pure EABI user space always put syscall number into scno (r7). |
| 188 | */ |
| 189 | #elif defined(CONFIG_ARM_THUMB) |
| 190 | /* Legacy ABI only, possibly thumb mode. */ |
| 191 | tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs |
| 192 | addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in |
| 193 | USER( ldreq scno, [lr, #-4] ) |
| 194 | |
| 195 | #else |
| 196 | /* Legacy ABI only. */ |
| 197 | USER( ldr scno, [lr, #-4] ) @ get SWI instruction |
| 198 | #endif |
| 199 | |
| 200 | uaccess_disable tbl |
| 201 | |
| 202 | adr tbl, sys_call_table @ load syscall table pointer |
| 203 | |
| 204 | #if defined(CONFIG_OABI_COMPAT) |
| 205 | /* |
| 206 | * If the swi argument is zero, this is an EABI call and we do nothing. |
| 207 | * |
| 208 | * If this is an old ABI call, get the syscall number into scno and |
| 209 | * get the old ABI syscall table address. |
| 210 | */ |
| 211 | bics r10, r10, #0xff000000 |
| 212 | eorne scno, r10, #__NR_OABI_SYSCALL_BASE |
| 213 | ldrne tbl, =sys_oabi_call_table |
| 214 | #elif !defined(CONFIG_AEABI) |
| 215 | bic scno, scno, #0xff000000 @ mask off SWI op-code |
| 216 | eor scno, scno, #__NR_SYSCALL_BASE @ check OS number |
| 217 | #endif |
| 218 | |
| 219 | local_restart: |
| 220 | ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing |
| 221 | stmdb sp!, {r4, r5} @ push fifth and sixth args |
| 222 | |
| 223 | tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? |
| 224 | bne __sys_trace |
| 225 | |
| 226 | cmp scno, #NR_syscalls @ check upper syscall limit |
| 227 | badr lr, ret_fast_syscall @ return address |
| 228 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine |
| 229 | |
| 230 | add r1, sp, #S_OFF |
| 231 | 2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) |
| 232 | eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back |
| 233 | bcs arm_syscall |
| 234 | mov why, #0 @ no longer a real syscall |
| 235 | b sys_ni_syscall @ not private func |
| 236 | |
| 237 | #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) |
| 238 | /* |
| 239 | * We failed to handle a fault trying to access the page |
| 240 | * containing the swi instruction, but we're not really in a |
| 241 | * position to return -EFAULT. Instead, return back to the |
| 242 | * instruction and re-enter the user fault handling path trying |
| 243 | * to page it in. This will likely result in sending SEGV to the |
| 244 | * current task. |
| 245 | */ |
| 246 | 9001: |
| 247 | sub lr, lr, #4 |
| 248 | str lr, [sp, #S_PC] |
| 249 | b ret_fast_syscall |
| 250 | #endif |
| 251 | ENDPROC(vector_swi) |
| 252 | |
| 253 | /* |
| 254 | * This is the really slow path. We're going to be doing |
| 255 | * context switches, and waiting for our parent to respond. |
| 256 | */ |
| 257 | __sys_trace: |
| 258 | mov r1, scno |
| 259 | add r0, sp, #S_OFF |
| 260 | bl syscall_trace_enter |
| 261 | |
| 262 | badr lr, __sys_trace_return @ return address |
| 263 | mov scno, r0 @ syscall number (possibly new) |
| 264 | add r1, sp, #S_R0 + S_OFF @ pointer to regs |
| 265 | cmp scno, #NR_syscalls @ check upper syscall limit |
| 266 | ldmccia r1, {r0 - r6} @ have to reload r0 - r6 |
| 267 | stmccia sp, {r4, r5} @ and update the stack args |
| 268 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine |
| 269 | cmp scno, #-1 @ skip the syscall? |
| 270 | bne 2b |
| 271 | add sp, sp, #S_OFF @ restore stack |
| 272 | b ret_slow_syscall |
| 273 | |
| 274 | __sys_trace_return: |
| 275 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 |
| 276 | mov r0, sp |
| 277 | bl syscall_trace_exit |
| 278 | b ret_slow_syscall |
| 279 | |
| 280 | __sys_trace_return_nosave: |
| 281 | enable_irq_notrace |
| 282 | mov r0, sp |
| 283 | bl syscall_trace_exit |
| 284 | b ret_slow_syscall |
| 285 | |
| 286 | .align 5 |
| 287 | #ifdef CONFIG_ALIGNMENT_TRAP |
| 288 | .type __cr_alignment, #object |
| 289 | __cr_alignment: |
| 290 | .word cr_alignment |
| 291 | #endif |
| 292 | .ltorg |
| 293 | |
| 294 | /* |
| 295 | * This is the syscall table declaration for native ABI syscalls. |
| 296 | * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. |
| 297 | */ |
| 298 | #define ABI(native, compat) native |
| 299 | #ifdef CONFIG_AEABI |
| 300 | #define OBSOLETE(syscall) sys_ni_syscall |
| 301 | #else |
| 302 | #define OBSOLETE(syscall) syscall |
| 303 | #endif |
| 304 | |
| 305 | .type sys_call_table, #object |
| 306 | ENTRY(sys_call_table) |
| 307 | #include "calls.S" |
| 308 | #undef ABI |
| 309 | #undef OBSOLETE |
| 310 | |
| 311 | /*============================================================================ |
| 312 | * Special system call wrappers |
| 313 | */ |
| 314 | @ r0 = syscall number |
| 315 | @ r8 = syscall table |
| 316 | sys_syscall: |
| 317 | bic scno, r0, #__NR_OABI_SYSCALL_BASE |
| 318 | cmp scno, #__NR_syscall - __NR_SYSCALL_BASE |
| 319 | cmpne scno, #NR_syscalls @ check range |
| 320 | stmloia sp, {r5, r6} @ shuffle args |
| 321 | movlo r0, r1 |
| 322 | movlo r1, r2 |
| 323 | movlo r2, r3 |
| 324 | movlo r3, r4 |
| 325 | ldrlo pc, [tbl, scno, lsl #2] |
| 326 | b sys_ni_syscall |
| 327 | ENDPROC(sys_syscall) |
| 328 | |
| 329 | sys_sigreturn_wrapper: |
| 330 | add r0, sp, #S_OFF |
| 331 | mov why, #0 @ prevent syscall restart handling |
| 332 | b sys_sigreturn |
| 333 | ENDPROC(sys_sigreturn_wrapper) |
| 334 | |
| 335 | sys_rt_sigreturn_wrapper: |
| 336 | add r0, sp, #S_OFF |
| 337 | mov why, #0 @ prevent syscall restart handling |
| 338 | b sys_rt_sigreturn |
| 339 | ENDPROC(sys_rt_sigreturn_wrapper) |
| 340 | |
| 341 | sys_statfs64_wrapper: |
| 342 | teq r1, #88 |
| 343 | moveq r1, #84 |
| 344 | b sys_statfs64 |
| 345 | ENDPROC(sys_statfs64_wrapper) |
| 346 | |
| 347 | sys_fstatfs64_wrapper: |
| 348 | teq r1, #88 |
| 349 | moveq r1, #84 |
| 350 | b sys_fstatfs64 |
| 351 | ENDPROC(sys_fstatfs64_wrapper) |
| 352 | |
| 353 | /* |
| 354 | * Note: off_4k (r5) is always units of 4K. If we can't do the requested |
| 355 | * offset, we return EINVAL. |
| 356 | */ |
| 357 | sys_mmap2: |
| 358 | #if PAGE_SHIFT > 12 |
| 359 | tst r5, #PGOFF_MASK |
| 360 | moveq r5, r5, lsr #PAGE_SHIFT - 12 |
| 361 | streq r5, [sp, #4] |
| 362 | beq sys_mmap_pgoff |
| 363 | mov r0, #-EINVAL |
| 364 | ret lr |
| 365 | #else |
| 366 | str r5, [sp, #4] |
| 367 | b sys_mmap_pgoff |
| 368 | #endif |
| 369 | ENDPROC(sys_mmap2) |
| 370 | |
| 371 | #ifdef CONFIG_OABI_COMPAT |
| 372 | |
| 373 | /* |
| 374 | * These are syscalls with argument register differences |
| 375 | */ |
| 376 | |
| 377 | sys_oabi_pread64: |
| 378 | stmia sp, {r3, r4} |
| 379 | b sys_pread64 |
| 380 | ENDPROC(sys_oabi_pread64) |
| 381 | |
| 382 | sys_oabi_pwrite64: |
| 383 | stmia sp, {r3, r4} |
| 384 | b sys_pwrite64 |
| 385 | ENDPROC(sys_oabi_pwrite64) |
| 386 | |
| 387 | sys_oabi_truncate64: |
| 388 | mov r3, r2 |
| 389 | mov r2, r1 |
| 390 | b sys_truncate64 |
| 391 | ENDPROC(sys_oabi_truncate64) |
| 392 | |
| 393 | sys_oabi_ftruncate64: |
| 394 | mov r3, r2 |
| 395 | mov r2, r1 |
| 396 | b sys_ftruncate64 |
| 397 | ENDPROC(sys_oabi_ftruncate64) |
| 398 | |
| 399 | sys_oabi_readahead: |
| 400 | str r3, [sp] |
| 401 | mov r3, r2 |
| 402 | mov r2, r1 |
| 403 | b sys_readahead |
| 404 | ENDPROC(sys_oabi_readahead) |
| 405 | |
| 406 | /* |
| 407 | * Let's declare a second syscall table for old ABI binaries |
| 408 | * using the compatibility syscall entries. |
| 409 | */ |
| 410 | #define ABI(native, compat) compat |
| 411 | #define OBSOLETE(syscall) syscall |
| 412 | |
| 413 | .type sys_oabi_call_table, #object |
| 414 | ENTRY(sys_oabi_call_table) |
| 415 | #include "calls.S" |
| 416 | #undef ABI |
| 417 | #undef OBSOLETE |
| 418 | |
| 419 | #endif |
| 420 | |