Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/entry-armv.S | |
3 | * | |
4 | * Copyright (C) 1996,1997,1998 Russell King. | |
5 | * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) | |
afeb90ca | 6 | * nommu support by Hyok S. Choi (hyok.choi@samsung.com) |
1da177e4 LT |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * Low-level vector interface routines | |
13 | * | |
70b6f2b4 NP |
14 | * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction |
15 | * that causes it to save wrong values... Be aware! | |
1da177e4 | 16 | */ |
1da177e4 | 17 | |
f09b9979 | 18 | #include <asm/memory.h> |
753790e7 RK |
19 | #include <asm/glue-df.h> |
20 | #include <asm/glue-pf.h> | |
1da177e4 | 21 | #include <asm/vfpmacros.h> |
a09e64fb | 22 | #include <mach/entry-macro.S> |
d6551e88 | 23 | #include <asm/thread_notify.h> |
c4c5716e | 24 | #include <asm/unwind.h> |
cc20d429 | 25 | #include <asm/unistd.h> |
f159f4ed | 26 | #include <asm/tls.h> |
ef4c5368 | 27 | #include <asm/system.h> |
1da177e4 LT |
28 | |
29 | #include "entry-header.S" | |
cd544ce7 | 30 | #include <asm/entry-macro-multi.S> |
1da177e4 | 31 | |
187a51ad | 32 | /* |
d9600c99 | 33 | * Interrupt handling. |
187a51ad RK |
34 | */ |
35 | .macro irq_handler | |
52108641 | 36 | #ifdef CONFIG_MULTI_IRQ_HANDLER |
d9600c99 | 37 | ldr r1, =handle_arch_irq |
52108641 | 38 | mov r0, sp |
d9600c99 | 39 | ldr r1, [r1] |
52108641 | 40 | adr lr, BSYM(9997f) |
d9600c99 RK |
41 | teq r1, #0 |
42 | movne pc, r1 | |
37ee16ae | 43 | #endif |
cd544ce7 | 44 | arch_irq_handler_default |
f00ec48f | 45 | 9997: |
187a51ad RK |
46 | .endm |
47 | ||
ac8b9c1c | 48 | .macro pabt_helper |
8dfe7ac9 | 49 | @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 |
ac8b9c1c | 50 | #ifdef MULTI_PABORT |
0402bece | 51 | ldr ip, .LCprocfns |
ac8b9c1c | 52 | mov lr, pc |
0402bece | 53 | ldr pc, [ip, #PROCESSOR_PABT_FUNC] |
ac8b9c1c RK |
54 | #else |
55 | bl CPU_PABORT_HANDLER | |
56 | #endif | |
57 | .endm | |
58 | ||
59 | .macro dabt_helper | |
60 | ||
61 | @ | |
62 | @ Call the processor-specific abort handler: | |
63 | @ | |
da740472 | 64 | @ r2 - pt_regs |
3e287bec RK |
65 | @ r4 - aborted context pc |
66 | @ r5 - aborted context psr | |
ac8b9c1c RK |
67 | @ |
68 | @ The abort handler must return the aborted address in r0, and | |
69 | @ the fault status register in r1. r9 must be preserved. | |
70 | @ | |
71 | #ifdef MULTI_DABORT | |
0402bece | 72 | ldr ip, .LCprocfns |
ac8b9c1c | 73 | mov lr, pc |
0402bece | 74 | ldr pc, [ip, #PROCESSOR_DABT_FUNC] |
ac8b9c1c RK |
75 | #else |
76 | bl CPU_DABORT_HANDLER | |
77 | #endif | |
78 | .endm | |
79 | ||
785d3cd2 NP |
80 | #ifdef CONFIG_KPROBES |
81 | .section .kprobes.text,"ax",%progbits | |
82 | #else | |
83 | .text | |
84 | #endif | |
85 | ||
1da177e4 LT |
86 | /* |
87 | * Invalid mode handlers | |
88 | */ | |
ccea7a19 RK |
89 | .macro inv_entry, reason |
90 | sub sp, sp, #S_FRAME_SIZE | |
b86040a5 CM |
91 | ARM( stmib sp, {r1 - lr} ) |
92 | THUMB( stmia sp, {r0 - r12} ) | |
93 | THUMB( str sp, [sp, #S_SP] ) | |
94 | THUMB( str lr, [sp, #S_LR] ) | |
1da177e4 LT |
95 | mov r1, #\reason |
96 | .endm | |
97 | ||
98 | __pabt_invalid: | |
ccea7a19 RK |
99 | inv_entry BAD_PREFETCH |
100 | b common_invalid | |
93ed3970 | 101 | ENDPROC(__pabt_invalid) |
1da177e4 LT |
102 | |
103 | __dabt_invalid: | |
ccea7a19 RK |
104 | inv_entry BAD_DATA |
105 | b common_invalid | |
93ed3970 | 106 | ENDPROC(__dabt_invalid) |
1da177e4 LT |
107 | |
108 | __irq_invalid: | |
ccea7a19 RK |
109 | inv_entry BAD_IRQ |
110 | b common_invalid | |
93ed3970 | 111 | ENDPROC(__irq_invalid) |
1da177e4 LT |
112 | |
113 | __und_invalid: | |
ccea7a19 RK |
114 | inv_entry BAD_UNDEFINSTR |
115 | ||
116 | @ | |
117 | @ XXX fall through to common_invalid | |
118 | @ | |
119 | ||
120 | @ | |
121 | @ common_invalid - generic code for failed exception (re-entrant version of handlers) | |
122 | @ | |
123 | common_invalid: | |
124 | zero_fp | |
125 | ||
126 | ldmia r0, {r4 - r6} | |
127 | add r0, sp, #S_PC @ here for interlock avoidance | |
128 | mov r7, #-1 @ "" "" "" "" | |
129 | str r4, [sp] @ save preserved r0 | |
130 | stmia r0, {r5 - r7} @ lr_<exception>, | |
131 | @ cpsr_<exception>, "old_r0" | |
1da177e4 | 132 | |
1da177e4 | 133 | mov r0, sp |
1da177e4 | 134 | b bad_mode |
93ed3970 | 135 | ENDPROC(__und_invalid) |
1da177e4 LT |
136 | |
137 | /* | |
138 | * SVC mode handlers | |
139 | */ | |
2dede2d8 NP |
140 | |
141 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) | |
142 | #define SPFIX(code...) code | |
143 | #else | |
144 | #define SPFIX(code...) | |
145 | #endif | |
146 | ||
d30a0c8b | 147 | .macro svc_entry, stack_hole=0 |
c4c5716e CM |
148 | UNWIND(.fnstart ) |
149 | UNWIND(.save {r0 - pc} ) | |
b86040a5 CM |
150 | sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) |
151 | #ifdef CONFIG_THUMB2_KERNEL | |
152 | SPFIX( str r0, [sp] ) @ temporarily saved | |
153 | SPFIX( mov r0, sp ) | |
154 | SPFIX( tst r0, #4 ) @ test original stack alignment | |
155 | SPFIX( ldr r0, [sp] ) @ restored | |
156 | #else | |
2dede2d8 | 157 | SPFIX( tst sp, #4 ) |
b86040a5 CM |
158 | #endif |
159 | SPFIX( subeq sp, sp, #4 ) | |
160 | stmia sp, {r1 - r12} | |
ccea7a19 | 161 | |
b059bdc3 RK |
162 | ldmia r0, {r3 - r5} |
163 | add r7, sp, #S_SP - 4 @ here for interlock avoidance | |
164 | mov r6, #-1 @ "" "" "" "" | |
165 | add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) | |
166 | SPFIX( addeq r2, r2, #4 ) | |
167 | str r3, [sp, #-4]! @ save the "real" r0 copied | |
ccea7a19 RK |
168 | @ from the exception stack |
169 | ||
b059bdc3 | 170 | mov r3, lr |
1da177e4 LT |
171 | |
172 | @ | |
173 | @ We are now ready to fill in the remaining blanks on the stack: | |
174 | @ | |
b059bdc3 RK |
175 | @ r2 - sp_svc |
176 | @ r3 - lr_svc | |
177 | @ r4 - lr_<exception>, already fixed up for correct return/restart | |
178 | @ r5 - spsr_<exception> | |
179 | @ r6 - orig_r0 (see pt_regs definition in ptrace.h) | |
1da177e4 | 180 | @ |
b059bdc3 | 181 | stmia r7, {r2 - r6} |
1da177e4 | 182 | |
02fe2845 RK |
183 | #ifdef CONFIG_TRACE_IRQFLAGS |
184 | bl trace_hardirqs_off | |
185 | #endif | |
f2741b78 | 186 | .endm |
1da177e4 | 187 | |
f2741b78 RK |
188 | .align 5 |
189 | __dabt_svc: | |
190 | svc_entry | |
1da177e4 | 191 | mov r2, sp |
da740472 | 192 | dabt_helper |
1da177e4 LT |
193 | |
194 | @ | |
195 | @ IRQs off again before pulling preserved data off the stack | |
196 | @ | |
ac78884e | 197 | disable_irq_notrace |
1da177e4 | 198 | |
02fe2845 RK |
199 | #ifdef CONFIG_TRACE_IRQFLAGS |
200 | tst r5, #PSR_I_BIT | |
201 | bleq trace_hardirqs_on | |
202 | tst r5, #PSR_I_BIT | |
203 | blne trace_hardirqs_off | |
204 | #endif | |
b059bdc3 | 205 | svc_exit r5 @ return from exception |
c4c5716e | 206 | UNWIND(.fnend ) |
93ed3970 | 207 | ENDPROC(__dabt_svc) |
1da177e4 LT |
208 | |
209 | .align 5 | |
210 | __irq_svc: | |
ccea7a19 | 211 | svc_entry |
187a51ad | 212 | irq_handler |
1613cc11 | 213 | |
1da177e4 | 214 | #ifdef CONFIG_PREEMPT |
1613cc11 RK |
215 | get_thread_info tsk |
216 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count | |
706fdd9f | 217 | ldr r0, [tsk, #TI_FLAGS] @ get flags |
28fab1a2 RK |
218 | teq r8, #0 @ if preempt count != 0 |
219 | movne r0, #0 @ force flags to 0 | |
1da177e4 LT |
220 | tst r0, #_TIF_NEED_RESCHED |
221 | blne svc_preempt | |
1da177e4 | 222 | #endif |
30891c90 | 223 | |
7ad1bcb2 | 224 | #ifdef CONFIG_TRACE_IRQFLAGS |
fbab1c80 RK |
225 | @ The parent context IRQs must have been enabled to get here in |
226 | @ the first place, so there's no point checking the PSR I bit. | |
227 | bl trace_hardirqs_on | |
7ad1bcb2 | 228 | #endif |
b059bdc3 | 229 | svc_exit r5 @ return from exception |
c4c5716e | 230 | UNWIND(.fnend ) |
93ed3970 | 231 | ENDPROC(__irq_svc) |
1da177e4 LT |
232 | |
233 | .ltorg | |
234 | ||
235 | #ifdef CONFIG_PREEMPT | |
236 | svc_preempt: | |
28fab1a2 | 237 | mov r8, lr |
1da177e4 | 238 | 1: bl preempt_schedule_irq @ irq en/disable is done inside |
706fdd9f | 239 | ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS |
1da177e4 | 240 | tst r0, #_TIF_NEED_RESCHED |
28fab1a2 | 241 | moveq pc, r8 @ go again |
1da177e4 LT |
242 | b 1b |
243 | #endif | |
244 | ||
245 | .align 5 | |
246 | __und_svc: | |
d30a0c8b NP |
247 | #ifdef CONFIG_KPROBES |
248 | @ If a kprobe is about to simulate a "stmdb sp..." instruction, | |
249 | @ it obviously needs free stack space which then will belong to | |
250 | @ the saved context. | |
251 | svc_entry 64 | |
252 | #else | |
ccea7a19 | 253 | svc_entry |
d30a0c8b | 254 | #endif |
1da177e4 LT |
255 | @ |
256 | @ call emulation code, which returns using r9 if it has emulated | |
257 | @ the instruction, or the more conventional lr if we are to treat | |
258 | @ this as a real undefined instruction | |
259 | @ | |
260 | @ r0 - instruction | |
261 | @ | |
83e686ea | 262 | #ifndef CONFIG_THUMB2_KERNEL |
b059bdc3 | 263 | ldr r0, [r4, #-4] |
83e686ea | 264 | #else |
b059bdc3 | 265 | ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 |
85519189 | 266 | cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 |
b059bdc3 | 267 | ldrhhs r9, [r4] @ bottom 16 bits |
83e686ea CM |
268 | orrhs r0, r9, r0, lsl #16 |
269 | #endif | |
b86040a5 | 270 | adr r9, BSYM(1f) |
b059bdc3 | 271 | mov r2, r4 |
1da177e4 LT |
272 | bl call_fpe |
273 | ||
274 | mov r0, sp @ struct pt_regs *regs | |
275 | bl do_undefinstr | |
276 | ||
277 | @ | |
278 | @ IRQs off again before pulling preserved data off the stack | |
279 | @ | |
ac78884e | 280 | 1: disable_irq_notrace |
1da177e4 LT |
281 | |
282 | @ | |
283 | @ restore SPSR and restart the instruction | |
284 | @ | |
b059bdc3 | 285 | ldr r5, [sp, #S_PSR] @ Get SVC cpsr |
df295df6 RK |
286 | #ifdef CONFIG_TRACE_IRQFLAGS |
287 | tst r5, #PSR_I_BIT | |
288 | bleq trace_hardirqs_on | |
289 | tst r5, #PSR_I_BIT | |
290 | blne trace_hardirqs_off | |
291 | #endif | |
b059bdc3 | 292 | svc_exit r5 @ return from exception |
c4c5716e | 293 | UNWIND(.fnend ) |
93ed3970 | 294 | ENDPROC(__und_svc) |
1da177e4 LT |
295 | |
296 | .align 5 | |
297 | __pabt_svc: | |
ccea7a19 | 298 | svc_entry |
4fb28474 | 299 | mov r2, sp @ regs |
8dfe7ac9 | 300 | pabt_helper |
1da177e4 LT |
301 | |
302 | @ | |
303 | @ IRQs off again before pulling preserved data off the stack | |
304 | @ | |
ac78884e | 305 | disable_irq_notrace |
1da177e4 | 306 | |
02fe2845 RK |
307 | #ifdef CONFIG_TRACE_IRQFLAGS |
308 | tst r5, #PSR_I_BIT | |
309 | bleq trace_hardirqs_on | |
310 | tst r5, #PSR_I_BIT | |
311 | blne trace_hardirqs_off | |
312 | #endif | |
b059bdc3 | 313 | svc_exit r5 @ return from exception |
c4c5716e | 314 | UNWIND(.fnend ) |
93ed3970 | 315 | ENDPROC(__pabt_svc) |
1da177e4 LT |
316 | |
317 | .align 5 | |
49f680ea RK |
318 | .LCcralign: |
319 | .word cr_alignment | |
48d7927b | 320 | #ifdef MULTI_DABORT |
1da177e4 LT |
321 | .LCprocfns: |
322 | .word processor | |
323 | #endif | |
324 | .LCfp: | |
325 | .word fp_enter | |
1da177e4 LT |
326 | |
327 | /* | |
328 | * User mode handlers | |
2dede2d8 NP |
329 | * |
330 | * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE | |
1da177e4 | 331 | */ |
2dede2d8 NP |
332 | |
333 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7) | |
334 | #error "sizeof(struct pt_regs) must be a multiple of 8" | |
335 | #endif | |
336 | ||
ccea7a19 | 337 | .macro usr_entry |
c4c5716e CM |
338 | UNWIND(.fnstart ) |
339 | UNWIND(.cantunwind ) @ don't unwind the user space | |
ccea7a19 | 340 | sub sp, sp, #S_FRAME_SIZE |
b86040a5 CM |
341 | ARM( stmib sp, {r1 - r12} ) |
342 | THUMB( stmia sp, {r0 - r12} ) | |
ccea7a19 | 343 | |
b059bdc3 | 344 | ldmia r0, {r3 - r5} |
ccea7a19 | 345 | add r0, sp, #S_PC @ here for interlock avoidance |
b059bdc3 | 346 | mov r6, #-1 @ "" "" "" "" |
ccea7a19 | 347 | |
b059bdc3 | 348 | str r3, [sp] @ save the "real" r0 copied |
ccea7a19 | 349 | @ from the exception stack |
1da177e4 LT |
350 | |
351 | @ | |
352 | @ We are now ready to fill in the remaining blanks on the stack: | |
353 | @ | |
b059bdc3 RK |
354 | @ r4 - lr_<exception>, already fixed up for correct return/restart |
355 | @ r5 - spsr_<exception> | |
356 | @ r6 - orig_r0 (see pt_regs definition in ptrace.h) | |
1da177e4 LT |
357 | @ |
358 | @ Also, separately save sp_usr and lr_usr | |
359 | @ | |
b059bdc3 | 360 | stmia r0, {r4 - r6} |
b86040a5 CM |
361 | ARM( stmdb r0, {sp, lr}^ ) |
362 | THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) | |
1da177e4 LT |
363 | |
364 | @ | |
365 | @ Enable the alignment trap while in kernel mode | |
366 | @ | |
49f680ea | 367 | alignment_trap r0 |
1da177e4 LT |
368 | |
369 | @ | |
370 | @ Clear FP to mark the first stack frame | |
371 | @ | |
372 | zero_fp | |
f2741b78 RK |
373 | |
374 | #ifdef CONFIG_IRQSOFF_TRACER | |
375 | bl trace_hardirqs_off | |
376 | #endif | |
1da177e4 LT |
377 | .endm |
378 | ||
b49c0f24 | 379 | .macro kuser_cmpxchg_check |
40fb79c8 | 380 | #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) |
b49c0f24 NP |
381 | #ifndef CONFIG_MMU |
382 | #warning "NPTL on non MMU needs fixing" | |
383 | #else | |
384 | @ Make sure our user space atomic helper is restarted | |
385 | @ if it was interrupted in a critical region. Here we | |
386 | @ perform a quick test inline since it should be false | |
387 | @ 99.9999% of the time. The rest is done out of line. | |
b059bdc3 | 388 | cmp r4, #TASK_SIZE |
40fb79c8 | 389 | blhs kuser_cmpxchg64_fixup |
b49c0f24 NP |
390 | #endif |
391 | #endif | |
392 | .endm | |
393 | ||
1da177e4 LT |
394 | .align 5 |
395 | __dabt_usr: | |
ccea7a19 | 396 | usr_entry |
b49c0f24 | 397 | kuser_cmpxchg_check |
1da177e4 | 398 | mov r2, sp |
da740472 RK |
399 | dabt_helper |
400 | b ret_from_exception | |
c4c5716e | 401 | UNWIND(.fnend ) |
93ed3970 | 402 | ENDPROC(__dabt_usr) |
1da177e4 LT |
403 | |
404 | .align 5 | |
405 | __irq_usr: | |
ccea7a19 | 406 | usr_entry |
bc089602 | 407 | kuser_cmpxchg_check |
187a51ad | 408 | irq_handler |
1613cc11 | 409 | get_thread_info tsk |
1da177e4 | 410 | mov why, #0 |
9fc2552a | 411 | b ret_to_user_from_irq |
c4c5716e | 412 | UNWIND(.fnend ) |
93ed3970 | 413 | ENDPROC(__irq_usr) |
1da177e4 LT |
414 | |
415 | .ltorg | |
416 | ||
417 | .align 5 | |
418 | __und_usr: | |
ccea7a19 | 419 | usr_entry |
bc089602 | 420 | |
b059bdc3 RK |
421 | mov r2, r4 |
422 | mov r3, r5 | |
1da177e4 | 423 | |
1da177e4 LT |
424 | @ |
425 | @ fall through to the emulation code, which returns using r9 if | |
426 | @ it has emulated the instruction, or the more conventional lr | |
427 | @ if we are to treat this as a real undefined instruction | |
428 | @ | |
429 | @ r0 - instruction | |
430 | @ | |
b86040a5 CM |
431 | adr r9, BSYM(ret_from_exception) |
432 | adr lr, BSYM(__und_usr_unknown) | |
cb170a45 | 433 | tst r3, #PSR_T_BIT @ Thumb mode? |
b86040a5 | 434 | itet eq @ explicit IT needed for the 1f label |
cb170a45 PB |
435 | subeq r4, r2, #4 @ ARM instr at LR - 4 |
436 | subne r4, r2, #2 @ Thumb instr at LR - 2 | |
437 | 1: ldreqt r0, [r4] | |
26584853 CM |
438 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
439 | reveq r0, r0 @ little endian instruction | |
440 | #endif | |
cb170a45 PB |
441 | beq call_fpe |
442 | @ Thumb instruction | |
ef4c5368 DM |
443 | #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 |
444 | /* | |
445 | * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms | |
446 | * can never be supported in a single kernel, this code is not applicable at | |
447 | * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be | |
448 | * made about .arch directives. | |
449 | */ | |
450 | #if __LINUX_ARM_ARCH__ < 7 | |
451 | /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */ | |
452 | #define NEED_CPU_ARCHITECTURE | |
453 | ldr r5, .LCcpu_architecture | |
454 | ldr r5, [r5] | |
455 | cmp r5, #CPU_ARCH_ARMv7 | |
456 | blo __und_usr_unknown | |
457 | /* | |
458 | * The following code won't get run unless the running CPU really is v7, so | |
459 | * coding round the lack of ldrht on older arches is pointless. Temporarily | |
460 | * override the assembler target arch with the minimum required instead: | |
461 | */ | |
462 | .arch armv6t2 | |
463 | #endif | |
b86040a5 CM |
464 | 2: |
465 | ARM( ldrht r5, [r4], #2 ) | |
466 | THUMB( ldrht r5, [r4] ) | |
467 | THUMB( add r4, r4, #2 ) | |
85519189 | 468 | cmp r5, #0xe800 @ 32bit instruction if xx != 0 |
cb170a45 PB |
469 | blo __und_usr_unknown |
470 | 3: ldrht r0, [r4] | |
471 | add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 | |
472 | orr r0, r0, r5, lsl #16 | |
ef4c5368 DM |
473 | |
474 | #if __LINUX_ARM_ARCH__ < 7 | |
475 | /* If the target arch was overridden, change it back: */ | |
476 | #ifdef CONFIG_CPU_32v6K | |
477 | .arch armv6k | |
cb170a45 | 478 | #else |
ef4c5368 DM |
479 | .arch armv6 |
480 | #endif | |
481 | #endif /* __LINUX_ARM_ARCH__ < 7 */ | |
482 | #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ | |
cb170a45 PB |
483 | b __und_usr_unknown |
484 | #endif | |
c4c5716e | 485 | UNWIND(.fnend ) |
93ed3970 | 486 | ENDPROC(__und_usr) |
cb170a45 | 487 | |
1da177e4 LT |
488 | @ |
489 | @ fallthrough to call_fpe | |
490 | @ | |
491 | ||
492 | /* | |
493 | * The out of line fixup for the ldrt above. | |
494 | */ | |
4260415f | 495 | .pushsection .fixup, "ax" |
cb170a45 | 496 | 4: mov pc, r9 |
4260415f RK |
497 | .popsection |
498 | .pushsection __ex_table,"a" | |
cb170a45 PB |
499 | .long 1b, 4b |
500 | #if __LINUX_ARM_ARCH__ >= 7 | |
501 | .long 2b, 4b | |
502 | .long 3b, 4b | |
503 | #endif | |
4260415f | 504 | .popsection |
1da177e4 LT |
505 | |
506 | /* | |
507 | * Check whether the instruction is a co-processor instruction. | |
508 | * If yes, we need to call the relevant co-processor handler. | |
509 | * | |
510 | * Note that we don't do a full check here for the co-processor | |
511 | * instructions; all instructions with bit 27 set are well | |
512 | * defined. The only instructions that should fault are the | |
513 | * co-processor instructions. However, we have to watch out | |
514 | * for the ARM6/ARM7 SWI bug. | |
515 | * | |
b5872db4 CM |
516 | * NEON is a special case that has to be handled here. Not all |
517 | * NEON instructions are co-processor instructions, so we have | |
518 | * to make a special case of checking for them. Plus, there's | |
519 | * five groups of them, so we have a table of mask/opcode pairs | |
520 | * to check against, and if any match then we branch off into the | |
521 | * NEON handler code. | |
522 | * | |
1da177e4 LT |
523 | * Emulators may wish to make use of the following registers: |
524 | * r0 = instruction opcode. | |
525 | * r2 = PC+4 | |
db6ccbb6 | 526 | * r9 = normal "successful" return address |
1da177e4 | 527 | * r10 = this threads thread_info structure. |
db6ccbb6 | 528 | * lr = unrecognised instruction return address |
1da177e4 | 529 | */ |
cb170a45 PB |
530 | @ |
531 | @ Fall-through from Thumb-2 __und_usr | |
532 | @ | |
533 | #ifdef CONFIG_NEON | |
534 | adr r6, .LCneon_thumb_opcodes | |
535 | b 2f | |
536 | #endif | |
1da177e4 | 537 | call_fpe: |
b5872db4 | 538 | #ifdef CONFIG_NEON |
cb170a45 | 539 | adr r6, .LCneon_arm_opcodes |
b5872db4 CM |
540 | 2: |
541 | ldr r7, [r6], #4 @ mask value | |
542 | cmp r7, #0 @ end mask? | |
543 | beq 1f | |
544 | and r8, r0, r7 | |
545 | ldr r7, [r6], #4 @ opcode bits matching in mask | |
546 | cmp r8, r7 @ NEON instruction? | |
547 | bne 2b | |
548 | get_thread_info r10 | |
549 | mov r7, #1 | |
550 | strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used | |
551 | strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used | |
552 | b do_vfp @ let VFP handler handle this | |
553 | 1: | |
554 | #endif | |
1da177e4 | 555 | tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 |
cb170a45 | 556 | tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 |
1da177e4 LT |
557 | #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) |
558 | and r8, r0, #0x0f000000 @ mask out op-code bits | |
559 | teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)? | |
560 | #endif | |
561 | moveq pc, lr | |
562 | get_thread_info r10 @ get current thread | |
563 | and r8, r0, #0x00000f00 @ mask out CP number | |
b86040a5 | 564 | THUMB( lsr r8, r8, #8 ) |
1da177e4 LT |
565 | mov r7, #1 |
566 | add r6, r10, #TI_USED_CP | |
b86040a5 CM |
567 | ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] |
568 | THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] | |
1da177e4 LT |
569 | #ifdef CONFIG_IWMMXT |
570 | @ Test if we need to give access to iWMMXt coprocessors | |
571 | ldr r5, [r10, #TI_FLAGS] | |
572 | rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only | |
573 | movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) | |
574 | bcs iwmmxt_task_enable | |
575 | #endif | |
b86040a5 CM |
576 | ARM( add pc, pc, r8, lsr #6 ) |
577 | THUMB( lsl r8, r8, #2 ) | |
578 | THUMB( add pc, r8 ) | |
579 | nop | |
580 | ||
a771fe6e | 581 | movw_pc lr @ CP#0 |
b86040a5 CM |
582 | W(b) do_fpe @ CP#1 (FPE) |
583 | W(b) do_fpe @ CP#2 (FPE) | |
a771fe6e | 584 | movw_pc lr @ CP#3 |
c17fad11 LB |
585 | #ifdef CONFIG_CRUNCH |
586 | b crunch_task_enable @ CP#4 (MaverickCrunch) | |
587 | b crunch_task_enable @ CP#5 (MaverickCrunch) | |
588 | b crunch_task_enable @ CP#6 (MaverickCrunch) | |
589 | #else | |
a771fe6e CM |
590 | movw_pc lr @ CP#4 |
591 | movw_pc lr @ CP#5 | |
592 | movw_pc lr @ CP#6 | |
c17fad11 | 593 | #endif |
a771fe6e CM |
594 | movw_pc lr @ CP#7 |
595 | movw_pc lr @ CP#8 | |
596 | movw_pc lr @ CP#9 | |
1da177e4 | 597 | #ifdef CONFIG_VFP |
b86040a5 CM |
598 | W(b) do_vfp @ CP#10 (VFP) |
599 | W(b) do_vfp @ CP#11 (VFP) | |
1da177e4 | 600 | #else |
a771fe6e CM |
601 | movw_pc lr @ CP#10 (VFP) |
602 | movw_pc lr @ CP#11 (VFP) | |
1da177e4 | 603 | #endif |
a771fe6e CM |
604 | movw_pc lr @ CP#12 |
605 | movw_pc lr @ CP#13 | |
606 | movw_pc lr @ CP#14 (Debug) | |
607 | movw_pc lr @ CP#15 (Control) | |
1da177e4 | 608 | |
ef4c5368 DM |
609 | #ifdef NEED_CPU_ARCHITECTURE |
610 | .align 2 | |
611 | .LCcpu_architecture: | |
612 | .word __cpu_architecture | |
613 | #endif | |
614 | ||
b5872db4 CM |
615 | #ifdef CONFIG_NEON |
616 | .align 6 | |
617 | ||
cb170a45 | 618 | .LCneon_arm_opcodes: |
b5872db4 CM |
619 | .word 0xfe000000 @ mask |
620 | .word 0xf2000000 @ opcode | |
621 | ||
622 | .word 0xff100000 @ mask | |
623 | .word 0xf4000000 @ opcode | |
624 | ||
cb170a45 PB |
625 | .word 0x00000000 @ mask |
626 | .word 0x00000000 @ opcode | |
627 | ||
628 | .LCneon_thumb_opcodes: | |
629 | .word 0xef000000 @ mask | |
630 | .word 0xef000000 @ opcode | |
631 | ||
632 | .word 0xff100000 @ mask | |
633 | .word 0xf9000000 @ opcode | |
634 | ||
b5872db4 CM |
635 | .word 0x00000000 @ mask |
636 | .word 0x00000000 @ opcode | |
637 | #endif | |
638 | ||
1da177e4 | 639 | do_fpe: |
5d25ac03 | 640 | enable_irq |
1da177e4 LT |
641 | ldr r4, .LCfp |
642 | add r10, r10, #TI_FPSTATE @ r10 = workspace | |
643 | ldr pc, [r4] @ Call FP module USR entry point | |
644 | ||
645 | /* | |
646 | * The FP module is called with these registers set: | |
647 | * r0 = instruction | |
648 | * r2 = PC+4 | |
649 | * r9 = normal "successful" return address | |
650 | * r10 = FP workspace | |
651 | * lr = unrecognised FP instruction return address | |
652 | */ | |
653 | ||
124efc27 | 654 | .pushsection .data |
1da177e4 | 655 | ENTRY(fp_enter) |
db6ccbb6 | 656 | .word no_fp |
124efc27 | 657 | .popsection |
1da177e4 | 658 | |
83e686ea CM |
659 | ENTRY(no_fp) |
660 | mov pc, lr | |
661 | ENDPROC(no_fp) | |
db6ccbb6 RK |
662 | |
663 | __und_usr_unknown: | |
ecbab71c | 664 | enable_irq |
1da177e4 | 665 | mov r0, sp |
b86040a5 | 666 | adr lr, BSYM(ret_from_exception) |
1da177e4 | 667 | b do_undefinstr |
93ed3970 | 668 | ENDPROC(__und_usr_unknown) |
1da177e4 LT |
669 | |
670 | .align 5 | |
671 | __pabt_usr: | |
ccea7a19 | 672 | usr_entry |
4fb28474 | 673 | mov r2, sp @ regs |
8dfe7ac9 | 674 | pabt_helper |
c4c5716e | 675 | UNWIND(.fnend ) |
1da177e4 LT |
676 | /* fall through */ |
677 | /* | |
678 | * This is the return code to user mode for abort handlers | |
679 | */ | |
680 | ENTRY(ret_from_exception) | |
c4c5716e CM |
681 | UNWIND(.fnstart ) |
682 | UNWIND(.cantunwind ) | |
1da177e4 LT |
683 | get_thread_info tsk |
684 | mov why, #0 | |
685 | b ret_to_user | |
c4c5716e | 686 | UNWIND(.fnend ) |
93ed3970 CM |
687 | ENDPROC(__pabt_usr) |
688 | ENDPROC(ret_from_exception) | |
1da177e4 LT |
689 | |
690 | /* | |
691 | * Register switch for ARMv3 and ARMv4 processors | |
692 | * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info | |
693 | * previous and next are guaranteed not to be the same. | |
694 | */ | |
695 | ENTRY(__switch_to) | |
c4c5716e CM |
696 | UNWIND(.fnstart ) |
697 | UNWIND(.cantunwind ) | |
1da177e4 LT |
698 | add ip, r1, #TI_CPU_SAVE |
699 | ldr r3, [r2, #TI_TP_VALUE] | |
b86040a5 CM |
700 | ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack |
701 | THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack | |
702 | THUMB( str sp, [ip], #4 ) | |
703 | THUMB( str lr, [ip], #4 ) | |
247055aa | 704 | #ifdef CONFIG_CPU_USE_DOMAINS |
d6551e88 | 705 | ldr r6, [r2, #TI_CPU_DOMAIN] |
afeb90ca | 706 | #endif |
f159f4ed | 707 | set_tls r3, r4, r5 |
df0698be NP |
708 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) |
709 | ldr r7, [r2, #TI_TASK] | |
710 | ldr r8, =__stack_chk_guard | |
711 | ldr r7, [r7, #TSK_STACK_CANARY] | |
712 | #endif | |
247055aa | 713 | #ifdef CONFIG_CPU_USE_DOMAINS |
1da177e4 | 714 | mcr p15, 0, r6, c3, c0, 0 @ Set domain register |
1da177e4 | 715 | #endif |
d6551e88 RK |
716 | mov r5, r0 |
717 | add r4, r2, #TI_CPU_SAVE | |
718 | ldr r0, =thread_notify_head | |
719 | mov r1, #THREAD_NOTIFY_SWITCH | |
720 | bl atomic_notifier_call_chain | |
df0698be NP |
721 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) |
722 | str r7, [r8] | |
723 | #endif | |
b86040a5 | 724 | THUMB( mov ip, r4 ) |
d6551e88 | 725 | mov r0, r5 |
b86040a5 CM |
726 | ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously |
727 | THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously | |
728 | THUMB( ldr sp, [ip], #4 ) | |
729 | THUMB( ldr pc, [ip] ) | |
c4c5716e | 730 | UNWIND(.fnend ) |
93ed3970 | 731 | ENDPROC(__switch_to) |
1da177e4 LT |
732 | |
733 | __INIT | |
2d2669b6 NP |
734 | |
735 | /* | |
736 | * User helpers. | |
737 | * | |
2d2669b6 NP |
738 | * Each segment is 32-byte aligned and will be moved to the top of the high |
739 | * vector page. New segments (if ever needed) must be added in front of | |
740 | * existing ones. This mechanism should be used only for things that are | |
741 | * really small and justified, and not be abused freely. | |
742 | * | |
37b83046 | 743 | * See Documentation/arm/kernel_user_helpers.txt for formal definitions. |
2d2669b6 | 744 | */ |
b86040a5 | 745 | THUMB( .arm ) |
2d2669b6 | 746 | |
ba9b5d76 NP |
747 | .macro usr_ret, reg |
748 | #ifdef CONFIG_ARM_THUMB | |
749 | bx \reg | |
750 | #else | |
751 | mov pc, \reg | |
752 | #endif | |
753 | .endm | |
754 | ||
2d2669b6 NP |
755 | .align 5 |
756 | .globl __kuser_helper_start | |
757 | __kuser_helper_start: | |
758 | ||
7c612bfd | 759 | /* |
40fb79c8 NP |
760 | * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular |
761 | * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point. | |
7c612bfd NP |
762 | */ |
763 | ||
40fb79c8 NP |
764 | __kuser_cmpxchg64: @ 0xffff0f60 |
765 | ||
766 | #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | |
767 | ||
768 | /* | |
769 | * Poor you. No fast solution possible... | |
770 | * The kernel itself must perform the operation. | |
771 | * A special ghost syscall is used for that (see traps.c). | |
772 | */ | |
773 | stmfd sp!, {r7, lr} | |
774 | ldr r7, 1f @ it's 20 bits | |
775 | swi __ARM_NR_cmpxchg64 | |
776 | ldmfd sp!, {r7, pc} | |
777 | 1: .word __ARM_NR_cmpxchg64 | |
778 | ||
779 | #elif defined(CONFIG_CPU_32v6K) | |
780 | ||
781 | stmfd sp!, {r4, r5, r6, r7} | |
782 | ldrd r4, r5, [r0] @ load old val | |
783 | ldrd r6, r7, [r1] @ load new val | |
784 | smp_dmb arm | |
785 | 1: ldrexd r0, r1, [r2] @ load current val | |
786 | eors r3, r0, r4 @ compare with oldval (1) | |
787 | eoreqs r3, r1, r5 @ compare with oldval (2) | |
788 | strexdeq r3, r6, r7, [r2] @ store newval if eq | |
789 | teqeq r3, #1 @ success? | |
790 | beq 1b @ if no then retry | |
ed3768a8 | 791 | smp_dmb arm |
40fb79c8 NP |
792 | rsbs r0, r3, #0 @ set returned val and C flag |
793 | ldmfd sp!, {r4, r5, r6, r7} | |
794 | bx lr | |
795 | ||
796 | #elif !defined(CONFIG_SMP) | |
797 | ||
798 | #ifdef CONFIG_MMU | |
799 | ||
800 | /* | |
801 | * The only thing that can break atomicity in this cmpxchg64 | |
802 | * implementation is either an IRQ or a data abort exception | |
803 | * causing another process/thread to be scheduled in the middle of | |
804 | * the critical sequence. The same strategy as for cmpxchg is used. | |
805 | */ | |
806 | stmfd sp!, {r4, r5, r6, lr} | |
807 | ldmia r0, {r4, r5} @ load old val | |
808 | ldmia r1, {r6, lr} @ load new val | |
809 | 1: ldmia r2, {r0, r1} @ load current val | |
810 | eors r3, r0, r4 @ compare with oldval (1) | |
811 | eoreqs r3, r1, r5 @ compare with oldval (2) | |
812 | 2: stmeqia r2, {r6, lr} @ store newval if eq | |
813 | rsbs r0, r3, #0 @ set return val and C flag | |
814 | ldmfd sp!, {r4, r5, r6, pc} | |
815 | ||
816 | .text | |
817 | kuser_cmpxchg64_fixup: | |
818 | @ Called from kuser_cmpxchg_fixup. | |
3ad55155 | 819 | @ r4 = address of interrupted insn (must be preserved). |
40fb79c8 NP |
820 | @ sp = saved regs. r7 and r8 are clobbered. |
821 | @ 1b = first critical insn, 2b = last critical insn. | |
3ad55155 | 822 | @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. |
40fb79c8 NP |
823 | mov r7, #0xffff0fff |
824 | sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) | |
3ad55155 | 825 | subs r8, r4, r7 |
40fb79c8 NP |
826 | rsbcss r8, r8, #(2b - 1b) |
827 | strcs r7, [sp, #S_PC] | |
828 | #if __LINUX_ARM_ARCH__ < 6 | |
829 | bcc kuser_cmpxchg32_fixup | |
830 | #endif | |
831 | mov pc, lr | |
832 | .previous | |
833 | ||
834 | #else | |
835 | #warning "NPTL on non MMU needs fixing" | |
836 | mov r0, #-1 | |
837 | adds r0, r0, #0 | |
ba9b5d76 | 838 | usr_ret lr |
40fb79c8 NP |
839 | #endif |
840 | ||
841 | #else | |
842 | #error "incoherent kernel configuration" | |
843 | #endif | |
844 | ||
845 | /* pad to next slot */ | |
846 | .rept (16 - (. - __kuser_cmpxchg64)/4) | |
847 | .word 0 | |
848 | .endr | |
7c612bfd NP |
849 | |
850 | .align 5 | |
851 | ||
7c612bfd | 852 | __kuser_memory_barrier: @ 0xffff0fa0 |
ed3768a8 | 853 | smp_dmb arm |
ba9b5d76 | 854 | usr_ret lr |
7c612bfd NP |
855 | |
856 | .align 5 | |
2d2669b6 NP |
857 | |
858 | __kuser_cmpxchg: @ 0xffff0fc0 | |
859 | ||
dcef1f63 | 860 | #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) |
2d2669b6 | 861 | |
dcef1f63 NP |
862 | /* |
863 | * Poor you. No fast solution possible... | |
864 | * The kernel itself must perform the operation. | |
865 | * A special ghost syscall is used for that (see traps.c). | |
866 | */ | |
5e097445 | 867 | stmfd sp!, {r7, lr} |
55afd264 | 868 | ldr r7, 1f @ it's 20 bits |
cc20d429 | 869 | swi __ARM_NR_cmpxchg |
5e097445 | 870 | ldmfd sp!, {r7, pc} |
cc20d429 | 871 | 1: .word __ARM_NR_cmpxchg |
dcef1f63 NP |
872 | |
873 | #elif __LINUX_ARM_ARCH__ < 6 | |
2d2669b6 | 874 | |
b49c0f24 NP |
875 | #ifdef CONFIG_MMU |
876 | ||
2d2669b6 | 877 | /* |
b49c0f24 NP |
878 | * The only thing that can break atomicity in this cmpxchg |
879 | * implementation is either an IRQ or a data abort exception | |
880 | * causing another process/thread to be scheduled in the middle | |
881 | * of the critical sequence. To prevent this, code is added to | |
882 | * the IRQ and data abort exception handlers to set the pc back | |
883 | * to the beginning of the critical section if it is found to be | |
884 | * within that critical section (see kuser_cmpxchg_fixup). | |
2d2669b6 | 885 | */ |
b49c0f24 NP |
886 | 1: ldr r3, [r2] @ load current val |
887 | subs r3, r3, r0 @ compare with oldval | |
888 | 2: streq r1, [r2] @ store newval if eq | |
889 | rsbs r0, r3, #0 @ set return val and C flag | |
890 | usr_ret lr | |
891 | ||
892 | .text | |
40fb79c8 | 893 | kuser_cmpxchg32_fixup: |
b49c0f24 | 894 | @ Called from kuser_cmpxchg_check macro. |
b059bdc3 | 895 | @ r4 = address of interrupted insn (must be preserved). |
b49c0f24 NP |
896 | @ sp = saved regs. r7 and r8 are clobbered. |
897 | @ 1b = first critical insn, 2b = last critical insn. | |
b059bdc3 | 898 | @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. |
b49c0f24 NP |
899 | mov r7, #0xffff0fff |
900 | sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) | |
b059bdc3 | 901 | subs r8, r4, r7 |
b49c0f24 NP |
902 | rsbcss r8, r8, #(2b - 1b) |
903 | strcs r7, [sp, #S_PC] | |
904 | mov pc, lr | |
905 | .previous | |
906 | ||
49bca4c2 NP |
907 | #else |
908 | #warning "NPTL on non MMU needs fixing" | |
909 | mov r0, #-1 | |
910 | adds r0, r0, #0 | |
ba9b5d76 | 911 | usr_ret lr |
b49c0f24 | 912 | #endif |
2d2669b6 NP |
913 | |
914 | #else | |
915 | ||
ed3768a8 | 916 | smp_dmb arm |
b49c0f24 | 917 | 1: ldrex r3, [r2] |
2d2669b6 NP |
918 | subs r3, r3, r0 |
919 | strexeq r3, r1, [r2] | |
b49c0f24 NP |
920 | teqeq r3, #1 |
921 | beq 1b | |
2d2669b6 | 922 | rsbs r0, r3, #0 |
b49c0f24 | 923 | /* beware -- each __kuser slot must be 8 instructions max */ |
f00ec48f RK |
924 | ALT_SMP(b __kuser_memory_barrier) |
925 | ALT_UP(usr_ret lr) | |
2d2669b6 NP |
926 | |
927 | #endif | |
928 | ||
929 | .align 5 | |
930 | ||
2d2669b6 | 931 | __kuser_get_tls: @ 0xffff0fe0 |
f159f4ed | 932 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init |
ba9b5d76 | 933 | usr_ret lr |
f159f4ed TL |
934 | mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code |
935 | .rep 4 | |
936 | .word 0 @ 0xffff0ff0 software TLS value, then | |
937 | .endr @ pad up to __kuser_helper_version | |
2d2669b6 | 938 | |
2d2669b6 NP |
939 | __kuser_helper_version: @ 0xffff0ffc |
940 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) | |
941 | ||
942 | .globl __kuser_helper_end | |
943 | __kuser_helper_end: | |
944 | ||
b86040a5 | 945 | THUMB( .thumb ) |
2d2669b6 | 946 | |
1da177e4 LT |
947 | /* |
948 | * Vector stubs. | |
949 | * | |
7933523d RK |
950 | * This code is copied to 0xffff0200 so we can use branches in the |
951 | * vectors, rather than ldr's. Note that this code must not | |
952 | * exceed 0x300 bytes. | |
1da177e4 LT |
953 | * |
954 | * Common stub entry macro: | |
955 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | |
ccea7a19 RK |
956 | * |
957 | * SP points to a minimal amount of processor-private memory, the address | |
958 | * of which is copied into r0 for the mode specific abort handler. | |
1da177e4 | 959 | */ |
b7ec4795 | 960 | .macro vector_stub, name, mode, correction=0 |
1da177e4 LT |
961 | .align 5 |
962 | ||
963 | vector_\name: | |
1da177e4 LT |
964 | .if \correction |
965 | sub lr, lr, #\correction | |
966 | .endif | |
ccea7a19 RK |
967 | |
968 | @ | |
969 | @ Save r0, lr_<exception> (parent PC) and spsr_<exception> | |
970 | @ (parent CPSR) | |
971 | @ | |
972 | stmia sp, {r0, lr} @ save r0, lr | |
1da177e4 | 973 | mrs lr, spsr |
ccea7a19 RK |
974 | str lr, [sp, #8] @ save spsr |
975 | ||
1da177e4 | 976 | @ |
ccea7a19 | 977 | @ Prepare for SVC32 mode. IRQs remain disabled. |
1da177e4 | 978 | @ |
ccea7a19 | 979 | mrs r0, cpsr |
b86040a5 | 980 | eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE) |
ccea7a19 | 981 | msr spsr_cxsf, r0 |
1da177e4 | 982 | |
ccea7a19 RK |
983 | @ |
984 | @ the branch table must immediately follow this code | |
985 | @ | |
ccea7a19 | 986 | and lr, lr, #0x0f |
b86040a5 CM |
987 | THUMB( adr r0, 1f ) |
988 | THUMB( ldr lr, [r0, lr, lsl #2] ) | |
b7ec4795 | 989 | mov r0, sp |
b86040a5 | 990 | ARM( ldr lr, [pc, lr, lsl #2] ) |
ccea7a19 | 991 | movs pc, lr @ branch to handler in SVC mode |
93ed3970 | 992 | ENDPROC(vector_\name) |
88987ef9 CM |
993 | |
994 | .align 2 | |
995 | @ handler addresses follow this label | |
996 | 1: | |
1da177e4 LT |
997 | .endm |
998 | ||
7933523d | 999 | .globl __stubs_start |
1da177e4 LT |
1000 | __stubs_start: |
1001 | /* | |
1002 | * Interrupt dispatcher | |
1003 | */ | |
b7ec4795 | 1004 | vector_stub irq, IRQ_MODE, 4 |
1da177e4 LT |
1005 | |
1006 | .long __irq_usr @ 0 (USR_26 / USR_32) | |
1007 | .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) | |
1008 | .long __irq_invalid @ 2 (IRQ_26 / IRQ_32) | |
1009 | .long __irq_svc @ 3 (SVC_26 / SVC_32) | |
1010 | .long __irq_invalid @ 4 | |
1011 | .long __irq_invalid @ 5 | |
1012 | .long __irq_invalid @ 6 | |
1013 | .long __irq_invalid @ 7 | |
1014 | .long __irq_invalid @ 8 | |
1015 | .long __irq_invalid @ 9 | |
1016 | .long __irq_invalid @ a | |
1017 | .long __irq_invalid @ b | |
1018 | .long __irq_invalid @ c | |
1019 | .long __irq_invalid @ d | |
1020 | .long __irq_invalid @ e | |
1021 | .long __irq_invalid @ f | |
1022 | ||
1023 | /* | |
1024 | * Data abort dispatcher | |
1025 | * Enter in ABT mode, spsr = USR CPSR, lr = USR PC | |
1026 | */ | |
b7ec4795 | 1027 | vector_stub dabt, ABT_MODE, 8 |
1da177e4 LT |
1028 | |
1029 | .long __dabt_usr @ 0 (USR_26 / USR_32) | |
1030 | .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) | |
1031 | .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32) | |
1032 | .long __dabt_svc @ 3 (SVC_26 / SVC_32) | |
1033 | .long __dabt_invalid @ 4 | |
1034 | .long __dabt_invalid @ 5 | |
1035 | .long __dabt_invalid @ 6 | |
1036 | .long __dabt_invalid @ 7 | |
1037 | .long __dabt_invalid @ 8 | |
1038 | .long __dabt_invalid @ 9 | |
1039 | .long __dabt_invalid @ a | |
1040 | .long __dabt_invalid @ b | |
1041 | .long __dabt_invalid @ c | |
1042 | .long __dabt_invalid @ d | |
1043 | .long __dabt_invalid @ e | |
1044 | .long __dabt_invalid @ f | |
1045 | ||
1046 | /* | |
1047 | * Prefetch abort dispatcher | |
1048 | * Enter in ABT mode, spsr = USR CPSR, lr = USR PC | |
1049 | */ | |
b7ec4795 | 1050 | vector_stub pabt, ABT_MODE, 4 |
1da177e4 LT |
1051 | |
1052 | .long __pabt_usr @ 0 (USR_26 / USR_32) | |
1053 | .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) | |
1054 | .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32) | |
1055 | .long __pabt_svc @ 3 (SVC_26 / SVC_32) | |
1056 | .long __pabt_invalid @ 4 | |
1057 | .long __pabt_invalid @ 5 | |
1058 | .long __pabt_invalid @ 6 | |
1059 | .long __pabt_invalid @ 7 | |
1060 | .long __pabt_invalid @ 8 | |
1061 | .long __pabt_invalid @ 9 | |
1062 | .long __pabt_invalid @ a | |
1063 | .long __pabt_invalid @ b | |
1064 | .long __pabt_invalid @ c | |
1065 | .long __pabt_invalid @ d | |
1066 | .long __pabt_invalid @ e | |
1067 | .long __pabt_invalid @ f | |
1068 | ||
1069 | /* | |
1070 | * Undef instr entry dispatcher | |
1071 | * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | |
1072 | */ | |
b7ec4795 | 1073 | vector_stub und, UND_MODE |
1da177e4 LT |
1074 | |
1075 | .long __und_usr @ 0 (USR_26 / USR_32) | |
1076 | .long __und_invalid @ 1 (FIQ_26 / FIQ_32) | |
1077 | .long __und_invalid @ 2 (IRQ_26 / IRQ_32) | |
1078 | .long __und_svc @ 3 (SVC_26 / SVC_32) | |
1079 | .long __und_invalid @ 4 | |
1080 | .long __und_invalid @ 5 | |
1081 | .long __und_invalid @ 6 | |
1082 | .long __und_invalid @ 7 | |
1083 | .long __und_invalid @ 8 | |
1084 | .long __und_invalid @ 9 | |
1085 | .long __und_invalid @ a | |
1086 | .long __und_invalid @ b | |
1087 | .long __und_invalid @ c | |
1088 | .long __und_invalid @ d | |
1089 | .long __und_invalid @ e | |
1090 | .long __und_invalid @ f | |
1091 | ||
1092 | .align 5 | |
1093 | ||
1094 | /*============================================================================= | |
1095 | * Undefined FIQs | |
1096 | *----------------------------------------------------------------------------- | |
1097 | * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC | |
1098 | * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg. | |
1099 | * Basically to switch modes, we *HAVE* to clobber one register... brain | |
1100 | * damage alert! I don't think that we can execute any code in here in any | |
1101 | * other mode than FIQ... Ok you can switch to another mode, but you can't | |
1102 | * get out of that mode without clobbering one register. | |
1103 | */ | |
1104 | vector_fiq: | |
1105 | disable_fiq | |
1106 | subs pc, lr, #4 | |
1107 | ||
1108 | /*============================================================================= | |
1109 | * Address exception handler | |
1110 | *----------------------------------------------------------------------------- | |
1111 | * These aren't too critical. | |
1112 | * (they're not supposed to happen, and won't happen in 32-bit data mode). | |
1113 | */ | |
1114 | ||
1115 | vector_addrexcptn: | |
1116 | b vector_addrexcptn | |
1117 | ||
1118 | /* | |
1119 | * We group all the following data together to optimise | |
1120 | * for CPUs with separate I & D caches. | |
1121 | */ | |
1122 | .align 5 | |
1123 | ||
1124 | .LCvswi: | |
1125 | .word vector_swi | |
1126 | ||
7933523d | 1127 | .globl __stubs_end |
1da177e4 LT |
1128 | __stubs_end: |
1129 | ||
7933523d | 1130 | .equ stubs_offset, __vectors_start + 0x200 - __stubs_start |
1da177e4 | 1131 | |
7933523d RK |
1132 | .globl __vectors_start |
1133 | __vectors_start: | |
b86040a5 CM |
1134 | ARM( swi SYS_ERROR0 ) |
1135 | THUMB( svc #0 ) | |
1136 | THUMB( nop ) | |
1137 | W(b) vector_und + stubs_offset | |
1138 | W(ldr) pc, .LCvswi + stubs_offset | |
1139 | W(b) vector_pabt + stubs_offset | |
1140 | W(b) vector_dabt + stubs_offset | |
1141 | W(b) vector_addrexcptn + stubs_offset | |
1142 | W(b) vector_irq + stubs_offset | |
1143 | W(b) vector_fiq + stubs_offset | |
7933523d RK |
1144 | |
1145 | .globl __vectors_end | |
1146 | __vectors_end: | |
1da177e4 LT |
1147 | |
1148 | .data | |
1149 | ||
1da177e4 LT |
1150 | .globl cr_alignment |
1151 | .globl cr_no_alignment | |
1152 | cr_alignment: | |
1153 | .space 4 | |
1154 | cr_no_alignment: | |
1155 | .space 4 | |
52108641 | 1156 | |
1157 | #ifdef CONFIG_MULTI_IRQ_HANDLER | |
1158 | .globl handle_arch_irq | |
1159 | handle_arch_irq: | |
1160 | .space 4 | |
1161 | #endif |