2 * Low-level exception handling code
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/init.h>
22 #include <linux/linkage.h>
24 #include <asm/assembler.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/errno.h>
28 #include <asm/thread_info.h>
29 #include <asm/unistd.h>
32 * Context tracking subsystem. Used to instrument transitions
33 * between user and kernel mode.
35 .macro ct_user_exit, syscall = 0
36 #ifdef CONFIG_CONTEXT_TRACKING
37 bl context_tracking_user_exit
40 * Save/restore needed during syscalls. Restore syscall arguments from
41 * the values already saved on stack during kernel_entry.
44 ldp x2, x3, [sp, #S_X2]
45 ldp x4, x5, [sp, #S_X4]
46 ldp x6, x7, [sp, #S_X6]
52 #ifdef CONFIG_CONTEXT_TRACKING
53 bl context_tracking_user_enter
66 .macro kernel_entry, el, regsize = 64
67 sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
69 mov w0, w0 // zero upper 32 bits of x0
88 get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
89 ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
90 disable_step_tsk x19, x20 // exceptions when scheduling.
92 add x21, sp, #S_FRAME_SIZE
96 stp lr, x21, [sp, #S_LR]
97 stp x22, x23, [sp, #S_PC]
100 * Set syscallno to -1 by default (overridden later if real syscall).
104 str x21, [sp, #S_SYSCALLNO]
108 * Registers that may be useful after this macro is invoked:
112 * x23 - aborted PSTATE
116 .macro kernel_exit, el, ret = 0
117 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
120 ldr x23, [sp, #S_SP] // load return stack pointer
123 ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
128 pop x2, x3 // load the rest of the registers
132 msr elr_el1, x21 // set up the return data
147 ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
148 eret // return to kernel
151 .macro get_thread_info, rd
153 and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
157 * These are the registers used in the syscall handler, and allow us to
158 * have in theory up to 7 arguments to a function - x0 to x6.
160 * x7 is reserved for the system call number in 32-bit mode.
162 sc_nr .req x25 // number of system calls
163 scno .req x26 // syscall number
164 stbl .req x27 // syscall table pointer
165 tsk .req x28 // current thread_info
168 * Interrupt handling.
171 ldr x1, handle_arch_irq
184 ventry el1_sync_invalid // Synchronous EL1t
185 ventry el1_irq_invalid // IRQ EL1t
186 ventry el1_fiq_invalid // FIQ EL1t
187 ventry el1_error_invalid // Error EL1t
189 ventry el1_sync // Synchronous EL1h
190 ventry el1_irq // IRQ EL1h
191 ventry el1_fiq_invalid // FIQ EL1h
192 ventry el1_error_invalid // Error EL1h
194 ventry el0_sync // Synchronous 64-bit EL0
195 ventry el0_irq // IRQ 64-bit EL0
196 ventry el0_fiq_invalid // FIQ 64-bit EL0
197 ventry el0_error_invalid // Error 64-bit EL0
200 ventry el0_sync_compat // Synchronous 32-bit EL0
201 ventry el0_irq_compat // IRQ 32-bit EL0
202 ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
203 ventry el0_error_invalid_compat // Error 32-bit EL0
205 ventry el0_sync_invalid // Synchronous 32-bit EL0
206 ventry el0_irq_invalid // IRQ 32-bit EL0
207 ventry el0_fiq_invalid // FIQ 32-bit EL0
208 ventry el0_error_invalid // Error 32-bit EL0
213 * Invalid mode handlers
215 .macro inv_entry, el, reason, regsize = 64
216 kernel_entry el, \regsize
224 inv_entry 0, BAD_SYNC
225 ENDPROC(el0_sync_invalid)
229 ENDPROC(el0_irq_invalid)
233 ENDPROC(el0_fiq_invalid)
236 inv_entry 0, BAD_ERROR
237 ENDPROC(el0_error_invalid)
240 el0_fiq_invalid_compat:
241 inv_entry 0, BAD_FIQ, 32
242 ENDPROC(el0_fiq_invalid_compat)
244 el0_error_invalid_compat:
245 inv_entry 0, BAD_ERROR, 32
246 ENDPROC(el0_error_invalid_compat)
250 inv_entry 1, BAD_SYNC
251 ENDPROC(el1_sync_invalid)
255 ENDPROC(el1_irq_invalid)
259 ENDPROC(el1_fiq_invalid)
262 inv_entry 1, BAD_ERROR
263 ENDPROC(el1_error_invalid)
271 mrs x1, esr_el1 // read the syndrome register
272 lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
273 cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
275 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
277 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
279 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
281 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
283 cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
288 * Data abort handling
292 // re-enable interrupts if they were enabled in the aborted context
293 tbnz x23, #7, 1f // PSR_I_BIT
296 mov x2, sp // struct pt_regs
299 // disable interrupts before pulling preserved data off the stack
304 * Stack or PC alignment exception handling
312 * Undefined instruction
319 * Debug exception handling
321 cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
322 cinc x24, x24, eq // set bit '0'
323 tbz x24, #0, el1_inv // EL1 only
325 mov x2, sp // struct pt_regs
326 bl do_debug_exception
330 // TODO: add support for undefined instructions in kernel mode
342 #ifdef CONFIG_TRACE_IRQFLAGS
343 bl trace_hardirqs_off
348 #ifdef CONFIG_PREEMPT
350 ldr w24, [tsk, #TI_PREEMPT] // get preempt count
351 cbnz w24, 1f // preempt count != 0
352 ldr x0, [tsk, #TI_FLAGS] // get flags
353 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
357 #ifdef CONFIG_TRACE_IRQFLAGS
363 #ifdef CONFIG_PREEMPT
366 1: bl preempt_schedule_irq // irq en/disable is done inside
367 ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
368 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
378 mrs x25, esr_el1 // read the syndrome register
379 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
380 cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
382 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
384 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
386 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
388 cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
390 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
392 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
394 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
396 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
398 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
406 mrs x25, esr_el1 // read the syndrome register
407 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
408 cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
410 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
412 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
414 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
416 cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
418 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
420 cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
422 cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
424 cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
426 cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
428 cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
430 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
435 * AArch32 syscall handling
437 adr stbl, compat_sys_call_table // load compat syscall table pointer
438 uxtw scno, w7 // syscall number in w7 (r7)
439 mov sc_nr, #__NR_compat_syscalls
450 * Data abort handling
453 // enable interrupts before calling the main handler
456 bic x0, x26, #(0xff << 56)
463 * Instruction abort handling
466 // enable interrupts before calling the main handler
470 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
476 * Floating Point or Advanced SIMD access
486 * Floating Point or Advanced SIMD exception
496 * Stack or PC alignment exception handling
499 // enable interrupts before calling the main handler
508 * Undefined instruction
510 // enable interrupts before calling the main handler
518 * Debug exception handling
520 tbnz x24, #0, el0_inv // EL0 only
524 bl do_debug_exception
543 #ifdef CONFIG_TRACE_IRQFLAGS
544 bl trace_hardirqs_off
550 #ifdef CONFIG_TRACE_IRQFLAGS
557 * Register switch for AArch64. The callee-saved registers need to be saved
558 * and restored. On entry:
559 * x0 = previous task_struct (must be preserved across the switch)
560 * x1 = next task_struct
561 * Previous and next are guaranteed not to be the same.
565 add x8, x0, #THREAD_CPU_CONTEXT
567 stp x19, x20, [x8], #16 // store callee-saved registers
568 stp x21, x22, [x8], #16
569 stp x23, x24, [x8], #16
570 stp x25, x26, [x8], #16
571 stp x27, x28, [x8], #16
572 stp x29, x9, [x8], #16
574 add x8, x1, #THREAD_CPU_CONTEXT
575 ldp x19, x20, [x8], #16 // restore callee-saved registers
576 ldp x21, x22, [x8], #16
577 ldp x23, x24, [x8], #16
578 ldp x25, x26, [x8], #16
579 ldp x27, x28, [x8], #16
580 ldp x29, x9, [x8], #16
584 ENDPROC(cpu_switch_to)
587 * This is the fast syscall return path. We do as little as possible here,
588 * and this includes saving x0 back into the kernel stack.
591 disable_irq // disable interrupts
592 ldr x1, [tsk, #TI_FLAGS]
593 and x2, x1, #_TIF_WORK_MASK
594 cbnz x2, fast_work_pending
595 enable_step_tsk x1, x2
596 kernel_exit 0, ret = 1
599 * Ok, we need to do extra processing, enter the slow path.
602 str x0, [sp, #S_X0] // returned x0
604 tbnz x1, #TIF_NEED_RESCHED, work_resched
605 /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
606 ldr x2, [sp, #S_PSTATE]
608 tst x2, #PSR_MODE_MASK // user mode regs?
609 b.ne no_work_pending // returning to kernel
610 enable_irq // enable interrupts for do_notify_resume()
617 * "slow" syscall return path.
620 disable_irq // disable interrupts
621 ldr x1, [tsk, #TI_FLAGS]
622 and x2, x1, #_TIF_WORK_MASK
623 cbnz x2, work_pending
624 enable_step_tsk x1, x2
626 kernel_exit 0, ret = 0
630 * This is how we return from a fork.
634 cbz x19, 1f // not a kernel thread
637 1: get_thread_info tsk
639 ENDPROC(ret_from_fork)
646 adrp stbl, sys_call_table // load syscall table pointer
647 uxtw scno, w8 // syscall number in w8
648 mov sc_nr, #__NR_syscalls
649 el0_svc_naked: // compat entry point
650 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
654 ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
655 tst x16, #_TIF_SYSCALL_WORK
657 adr lr, ret_fast_syscall // return address
658 cmp scno, sc_nr // check upper syscall limit
660 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
661 br x16 // call sys_* routine
668 * This is the really slow path. We're going to be doing context
669 * switches, and waiting for our parent to respond.
673 bl syscall_trace_enter
674 adr lr, __sys_trace_return // return address
675 uxtw scno, w0 // syscall number (possibly new)
676 mov x1, sp // pointer to regs
677 cmp scno, sc_nr // check upper syscall limit
679 ldp x0, x1, [sp] // restore the syscall args
680 ldp x2, x3, [sp, #S_X2]
681 ldp x4, x5, [sp, #S_X4]
682 ldp x6, x7, [sp, #S_X6]
683 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
684 br x16 // call sys_* routine
687 str x0, [sp] // save returned x0
689 bl syscall_trace_exit
693 * Special system call wrappers.
695 ENTRY(sys_rt_sigreturn_wrapper)
698 ENDPROC(sys_rt_sigreturn_wrapper)
700 ENTRY(handle_arch_irq)