Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/entry-armv.S | |
3 | * | |
4 | * Copyright (C) 1996,1997,1998 Russell King. | |
5 | * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) | |
afeb90ca | 6 | * nommu support by Hyok S. Choi (hyok.choi@samsung.com) |
1da177e4 LT |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * Low-level vector interface routines | |
13 | * | |
70b6f2b4 NP |
14 | * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction |
15 | * that causes it to save wrong values... Be aware! | |
1da177e4 | 16 | */ |
1da177e4 | 17 | |
6f6f6a70 | 18 | #include <asm/assembler.h> |
f09b9979 | 19 | #include <asm/memory.h> |
753790e7 RK |
20 | #include <asm/glue-df.h> |
21 | #include <asm/glue-pf.h> | |
1da177e4 | 22 | #include <asm/vfpmacros.h> |
243c8654 | 23 | #ifndef CONFIG_MULTI_IRQ_HANDLER |
a09e64fb | 24 | #include <mach/entry-macro.S> |
243c8654 | 25 | #endif |
d6551e88 | 26 | #include <asm/thread_notify.h> |
c4c5716e | 27 | #include <asm/unwind.h> |
cc20d429 | 28 | #include <asm/unistd.h> |
f159f4ed | 29 | #include <asm/tls.h> |
9f97da78 | 30 | #include <asm/system_info.h> |
1da177e4 LT |
31 | |
32 | #include "entry-header.S" | |
cd544ce7 | 33 | #include <asm/entry-macro-multi.S> |
1da177e4 | 34 | |
187a51ad | 35 | /* |
d9600c99 | 36 | * Interrupt handling. |
187a51ad RK |
37 | */ |
38 | .macro irq_handler | |
52108641 | 39 | #ifdef CONFIG_MULTI_IRQ_HANDLER |
d9600c99 | 40 | ldr r1, =handle_arch_irq |
52108641 | 41 | mov r0, sp |
52108641 | 42 | adr lr, BSYM(9997f) |
abeb24ae MZ |
43 | ldr pc, [r1] |
44 | #else | |
cd544ce7 | 45 | arch_irq_handler_default |
abeb24ae | 46 | #endif |
f00ec48f | 47 | 9997: |
187a51ad RK |
48 | .endm |
49 | ||
ac8b9c1c | 50 | .macro pabt_helper |
8dfe7ac9 | 51 | @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 |
ac8b9c1c | 52 | #ifdef MULTI_PABORT |
0402bece | 53 | ldr ip, .LCprocfns |
ac8b9c1c | 54 | mov lr, pc |
0402bece | 55 | ldr pc, [ip, #PROCESSOR_PABT_FUNC] |
ac8b9c1c RK |
56 | #else |
57 | bl CPU_PABORT_HANDLER | |
58 | #endif | |
59 | .endm | |
60 | ||
61 | .macro dabt_helper | |
62 | ||
63 | @ | |
64 | @ Call the processor-specific abort handler: | |
65 | @ | |
da740472 | 66 | @ r2 - pt_regs |
3e287bec RK |
67 | @ r4 - aborted context pc |
68 | @ r5 - aborted context psr | |
ac8b9c1c RK |
69 | @ |
70 | @ The abort handler must return the aborted address in r0, and | |
71 | @ the fault status register in r1. r9 must be preserved. | |
72 | @ | |
73 | #ifdef MULTI_DABORT | |
0402bece | 74 | ldr ip, .LCprocfns |
ac8b9c1c | 75 | mov lr, pc |
0402bece | 76 | ldr pc, [ip, #PROCESSOR_DABT_FUNC] |
ac8b9c1c RK |
77 | #else |
78 | bl CPU_DABORT_HANDLER | |
79 | #endif | |
80 | .endm | |
81 | ||
785d3cd2 NP |
82 | #ifdef CONFIG_KPROBES |
83 | .section .kprobes.text,"ax",%progbits | |
84 | #else | |
85 | .text | |
86 | #endif | |
87 | ||
1da177e4 LT |
88 | /* |
89 | * Invalid mode handlers | |
90 | */ | |
ccea7a19 RK |
91 | .macro inv_entry, reason |
92 | sub sp, sp, #S_FRAME_SIZE | |
b86040a5 CM |
93 | ARM( stmib sp, {r1 - lr} ) |
94 | THUMB( stmia sp, {r0 - r12} ) | |
95 | THUMB( str sp, [sp, #S_SP] ) | |
96 | THUMB( str lr, [sp, #S_LR] ) | |
1da177e4 LT |
97 | mov r1, #\reason |
98 | .endm | |
99 | ||
100 | __pabt_invalid: | |
ccea7a19 RK |
101 | inv_entry BAD_PREFETCH |
102 | b common_invalid | |
93ed3970 | 103 | ENDPROC(__pabt_invalid) |
1da177e4 LT |
104 | |
105 | __dabt_invalid: | |
ccea7a19 RK |
106 | inv_entry BAD_DATA |
107 | b common_invalid | |
93ed3970 | 108 | ENDPROC(__dabt_invalid) |
1da177e4 LT |
109 | |
110 | __irq_invalid: | |
ccea7a19 RK |
111 | inv_entry BAD_IRQ |
112 | b common_invalid | |
93ed3970 | 113 | ENDPROC(__irq_invalid) |
1da177e4 LT |
114 | |
115 | __und_invalid: | |
ccea7a19 RK |
116 | inv_entry BAD_UNDEFINSTR |
117 | ||
118 | @ | |
119 | @ XXX fall through to common_invalid | |
120 | @ | |
121 | ||
122 | @ | |
123 | @ common_invalid - generic code for failed exception (re-entrant version of handlers) | |
124 | @ | |
125 | common_invalid: | |
126 | zero_fp | |
127 | ||
128 | ldmia r0, {r4 - r6} | |
129 | add r0, sp, #S_PC @ here for interlock avoidance | |
130 | mov r7, #-1 @ "" "" "" "" | |
131 | str r4, [sp] @ save preserved r0 | |
132 | stmia r0, {r5 - r7} @ lr_<exception>, | |
133 | @ cpsr_<exception>, "old_r0" | |
1da177e4 | 134 | |
1da177e4 | 135 | mov r0, sp |
1da177e4 | 136 | b bad_mode |
93ed3970 | 137 | ENDPROC(__und_invalid) |
1da177e4 LT |
138 | |
139 | /* | |
140 | * SVC mode handlers | |
141 | */ | |
2dede2d8 NP |
142 | |
143 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) | |
144 | #define SPFIX(code...) code | |
145 | #else | |
146 | #define SPFIX(code...) | |
147 | #endif | |
148 | ||
c0e7f7ee | 149 | .macro svc_entry, stack_hole=0, trace=1 |
c4c5716e CM |
150 | UNWIND(.fnstart ) |
151 | UNWIND(.save {r0 - pc} ) | |
b86040a5 CM |
152 | sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) |
153 | #ifdef CONFIG_THUMB2_KERNEL | |
154 | SPFIX( str r0, [sp] ) @ temporarily saved | |
155 | SPFIX( mov r0, sp ) | |
156 | SPFIX( tst r0, #4 ) @ test original stack alignment | |
157 | SPFIX( ldr r0, [sp] ) @ restored | |
158 | #else | |
2dede2d8 | 159 | SPFIX( tst sp, #4 ) |
b86040a5 CM |
160 | #endif |
161 | SPFIX( subeq sp, sp, #4 ) | |
162 | stmia sp, {r1 - r12} | |
ccea7a19 | 163 | |
b059bdc3 RK |
164 | ldmia r0, {r3 - r5} |
165 | add r7, sp, #S_SP - 4 @ here for interlock avoidance | |
166 | mov r6, #-1 @ "" "" "" "" | |
167 | add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) | |
168 | SPFIX( addeq r2, r2, #4 ) | |
169 | str r3, [sp, #-4]! @ save the "real" r0 copied | |
ccea7a19 RK |
170 | @ from the exception stack |
171 | ||
b059bdc3 | 172 | mov r3, lr |
1da177e4 LT |
173 | |
174 | @ | |
175 | @ We are now ready to fill in the remaining blanks on the stack: | |
176 | @ | |
b059bdc3 RK |
177 | @ r2 - sp_svc |
178 | @ r3 - lr_svc | |
179 | @ r4 - lr_<exception>, already fixed up for correct return/restart | |
180 | @ r5 - spsr_<exception> | |
181 | @ r6 - orig_r0 (see pt_regs definition in ptrace.h) | |
1da177e4 | 182 | @ |
b059bdc3 | 183 | stmia r7, {r2 - r6} |
1da177e4 | 184 | |
c0e7f7ee | 185 | .if \trace |
02fe2845 RK |
186 | #ifdef CONFIG_TRACE_IRQFLAGS |
187 | bl trace_hardirqs_off | |
188 | #endif | |
c0e7f7ee | 189 | .endif |
f2741b78 | 190 | .endm |
1da177e4 | 191 | |
f2741b78 RK |
192 | .align 5 |
193 | __dabt_svc: | |
194 | svc_entry | |
1da177e4 | 195 | mov r2, sp |
da740472 | 196 | dabt_helper |
e16b31bf | 197 | THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR |
b059bdc3 | 198 | svc_exit r5 @ return from exception |
c4c5716e | 199 | UNWIND(.fnend ) |
93ed3970 | 200 | ENDPROC(__dabt_svc) |
1da177e4 LT |
201 | |
202 | .align 5 | |
203 | __irq_svc: | |
ccea7a19 | 204 | svc_entry |
187a51ad | 205 | irq_handler |
1613cc11 | 206 | |
1da177e4 | 207 | #ifdef CONFIG_PREEMPT |
1613cc11 RK |
208 | get_thread_info tsk |
209 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count | |
706fdd9f | 210 | ldr r0, [tsk, #TI_FLAGS] @ get flags |
28fab1a2 RK |
211 | teq r8, #0 @ if preempt count != 0 |
212 | movne r0, #0 @ force flags to 0 | |
1da177e4 LT |
213 | tst r0, #_TIF_NEED_RESCHED |
214 | blne svc_preempt | |
1da177e4 | 215 | #endif |
30891c90 | 216 | |
9b56febe | 217 | svc_exit r5, irq = 1 @ return from exception |
c4c5716e | 218 | UNWIND(.fnend ) |
93ed3970 | 219 | ENDPROC(__irq_svc) |
1da177e4 LT |
220 | |
221 | .ltorg | |
222 | ||
223 | #ifdef CONFIG_PREEMPT | |
224 | svc_preempt: | |
28fab1a2 | 225 | mov r8, lr |
1da177e4 | 226 | 1: bl preempt_schedule_irq @ irq en/disable is done inside |
706fdd9f | 227 | ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS |
1da177e4 | 228 | tst r0, #_TIF_NEED_RESCHED |
6ebbf2ce | 229 | reteq r8 @ go again |
1da177e4 LT |
230 | b 1b |
231 | #endif | |
232 | ||
15ac49b6 RK |
233 | __und_fault: |
234 | @ Correct the PC such that it is pointing at the instruction | |
235 | @ which caused the fault. If the faulting instruction was ARM | |
236 | @ the PC will be pointing at the next instruction, and have to | |
237 | @ subtract 4. Otherwise, it is Thumb, and the PC will be | |
238 | @ pointing at the second half of the Thumb instruction. We | |
239 | @ have to subtract 2. | |
240 | ldr r2, [r0, #S_PC] | |
241 | sub r2, r2, r1 | |
242 | str r2, [r0, #S_PC] | |
243 | b do_undefinstr | |
244 | ENDPROC(__und_fault) | |
245 | ||
1da177e4 LT |
246 | .align 5 |
247 | __und_svc: | |
d30a0c8b NP |
248 | #ifdef CONFIG_KPROBES |
249 | @ If a kprobe is about to simulate a "stmdb sp..." instruction, | |
250 | @ it obviously needs free stack space which then will belong to | |
251 | @ the saved context. | |
252 | svc_entry 64 | |
253 | #else | |
ccea7a19 | 254 | svc_entry |
d30a0c8b | 255 | #endif |
1da177e4 LT |
256 | @ |
257 | @ call emulation code, which returns using r9 if it has emulated | |
258 | @ the instruction, or the more conventional lr if we are to treat | |
259 | @ this as a real undefined instruction | |
260 | @ | |
261 | @ r0 - instruction | |
262 | @ | |
15ac49b6 | 263 | #ifndef CONFIG_THUMB2_KERNEL |
b059bdc3 | 264 | ldr r0, [r4, #-4] |
83e686ea | 265 | #else |
15ac49b6 | 266 | mov r1, #2 |
b059bdc3 | 267 | ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 |
85519189 | 268 | cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 |
15ac49b6 RK |
269 | blo __und_svc_fault |
270 | ldrh r9, [r4] @ bottom 16 bits | |
271 | add r4, r4, #2 | |
272 | str r4, [sp, #S_PC] | |
273 | orr r0, r9, r0, lsl #16 | |
83e686ea | 274 | #endif |
15ac49b6 | 275 | adr r9, BSYM(__und_svc_finish) |
b059bdc3 | 276 | mov r2, r4 |
1da177e4 LT |
277 | bl call_fpe |
278 | ||
15ac49b6 RK |
279 | mov r1, #4 @ PC correction to apply |
280 | __und_svc_fault: | |
1da177e4 | 281 | mov r0, sp @ struct pt_regs *regs |
15ac49b6 | 282 | bl __und_fault |
1da177e4 | 283 | |
15ac49b6 | 284 | __und_svc_finish: |
b059bdc3 RK |
285 | ldr r5, [sp, #S_PSR] @ Get SVC cpsr |
286 | svc_exit r5 @ return from exception | |
c4c5716e | 287 | UNWIND(.fnend ) |
93ed3970 | 288 | ENDPROC(__und_svc) |
1da177e4 LT |
289 | |
290 | .align 5 | |
291 | __pabt_svc: | |
ccea7a19 | 292 | svc_entry |
4fb28474 | 293 | mov r2, sp @ regs |
8dfe7ac9 | 294 | pabt_helper |
b059bdc3 | 295 | svc_exit r5 @ return from exception |
c4c5716e | 296 | UNWIND(.fnend ) |
93ed3970 | 297 | ENDPROC(__pabt_svc) |
1da177e4 | 298 | |
c0e7f7ee DT |
299 | .align 5 |
300 | __fiq_svc: | |
301 | svc_entry trace=0 | |
302 | mov r0, sp @ struct pt_regs *regs | |
303 | bl handle_fiq_as_nmi | |
304 | svc_exit_via_fiq | |
305 | UNWIND(.fnend ) | |
306 | ENDPROC(__fiq_svc) | |
307 | ||
1da177e4 | 308 | .align 5 |
49f680ea RK |
309 | .LCcralign: |
310 | .word cr_alignment | |
48d7927b | 311 | #ifdef MULTI_DABORT |
1da177e4 LT |
312 | .LCprocfns: |
313 | .word processor | |
314 | #endif | |
315 | .LCfp: | |
316 | .word fp_enter | |
1da177e4 | 317 | |
c0e7f7ee DT |
318 | /* |
319 | * Abort mode handlers | |
320 | */ | |
321 | ||
322 | @ | |
323 | @ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode | |
324 | @ and reuses the same macros. However in abort mode we must also | |
325 | @ save/restore lr_abt and spsr_abt to make nested aborts safe. | |
326 | @ | |
327 | .align 5 | |
328 | __fiq_abt: | |
329 | svc_entry trace=0 | |
330 | ||
331 | ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT ) | |
332 | THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT ) | |
333 | THUMB( msr cpsr_c, r0 ) | |
334 | mov r1, lr @ Save lr_abt | |
335 | mrs r2, spsr @ Save spsr_abt, abort is now safe | |
336 | ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT ) | |
337 | THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT ) | |
338 | THUMB( msr cpsr_c, r0 ) | |
339 | stmfd sp!, {r1 - r2} | |
340 | ||
341 | add r0, sp, #8 @ struct pt_regs *regs | |
342 | bl handle_fiq_as_nmi | |
343 | ||
344 | ldmfd sp!, {r1 - r2} | |
345 | ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT ) | |
346 | THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT ) | |
347 | THUMB( msr cpsr_c, r0 ) | |
348 | mov lr, r1 @ Restore lr_abt, abort is unsafe | |
349 | msr spsr_cxsf, r2 @ Restore spsr_abt | |
350 | ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT ) | |
351 | THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT ) | |
352 | THUMB( msr cpsr_c, r0 ) | |
353 | ||
354 | svc_exit_via_fiq | |
355 | UNWIND(.fnend ) | |
356 | ENDPROC(__fiq_abt) | |
357 | ||
1da177e4 LT |
358 | /* |
359 | * User mode handlers | |
2dede2d8 NP |
360 | * |
361 | * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE | |
1da177e4 | 362 | */ |
2dede2d8 NP |
363 | |
364 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7) | |
365 | #error "sizeof(struct pt_regs) must be a multiple of 8" | |
366 | #endif | |
367 | ||
c0e7f7ee | 368 | .macro usr_entry, trace=1 |
c4c5716e CM |
369 | UNWIND(.fnstart ) |
370 | UNWIND(.cantunwind ) @ don't unwind the user space | |
ccea7a19 | 371 | sub sp, sp, #S_FRAME_SIZE |
b86040a5 CM |
372 | ARM( stmib sp, {r1 - r12} ) |
373 | THUMB( stmia sp, {r0 - r12} ) | |
ccea7a19 | 374 | |
195b58ad RK |
375 | ATRAP( mrc p15, 0, r7, c1, c0, 0) |
376 | ATRAP( ldr r8, .LCcralign) | |
377 | ||
b059bdc3 | 378 | ldmia r0, {r3 - r5} |
ccea7a19 | 379 | add r0, sp, #S_PC @ here for interlock avoidance |
b059bdc3 | 380 | mov r6, #-1 @ "" "" "" "" |
ccea7a19 | 381 | |
b059bdc3 | 382 | str r3, [sp] @ save the "real" r0 copied |
ccea7a19 | 383 | @ from the exception stack |
1da177e4 | 384 | |
195b58ad RK |
385 | ATRAP( ldr r8, [r8, #0]) |
386 | ||
1da177e4 LT |
387 | @ |
388 | @ We are now ready to fill in the remaining blanks on the stack: | |
389 | @ | |
b059bdc3 RK |
390 | @ r4 - lr_<exception>, already fixed up for correct return/restart |
391 | @ r5 - spsr_<exception> | |
392 | @ r6 - orig_r0 (see pt_regs definition in ptrace.h) | |
1da177e4 LT |
393 | @ |
394 | @ Also, separately save sp_usr and lr_usr | |
395 | @ | |
b059bdc3 | 396 | stmia r0, {r4 - r6} |
b86040a5 CM |
397 | ARM( stmdb r0, {sp, lr}^ ) |
398 | THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) | |
1da177e4 | 399 | |
1da177e4 | 400 | @ Enable the alignment trap while in kernel mode |
195b58ad RK |
401 | ATRAP( teq r8, r7) |
402 | ATRAP( mcrne p15, 0, r8, c1, c0, 0) | |
1da177e4 LT |
403 | |
404 | @ | |
405 | @ Clear FP to mark the first stack frame | |
406 | @ | |
407 | zero_fp | |
f2741b78 | 408 | |
c0e7f7ee | 409 | .if \trace |
f2741b78 RK |
410 | #ifdef CONFIG_IRQSOFF_TRACER |
411 | bl trace_hardirqs_off | |
412 | #endif | |
b0088480 | 413 | ct_user_exit save = 0 |
c0e7f7ee | 414 | .endif |
1da177e4 LT |
415 | .endm |
416 | ||
b49c0f24 | 417 | .macro kuser_cmpxchg_check |
1b16c4bc RK |
418 | #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ |
419 | !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | |
b49c0f24 NP |
420 | #ifndef CONFIG_MMU |
421 | #warning "NPTL on non MMU needs fixing" | |
422 | #else | |
423 | @ Make sure our user space atomic helper is restarted | |
424 | @ if it was interrupted in a critical region. Here we | |
425 | @ perform a quick test inline since it should be false | |
426 | @ 99.9999% of the time. The rest is done out of line. | |
b059bdc3 | 427 | cmp r4, #TASK_SIZE |
40fb79c8 | 428 | blhs kuser_cmpxchg64_fixup |
b49c0f24 NP |
429 | #endif |
430 | #endif | |
431 | .endm | |
432 | ||
1da177e4 LT |
433 | .align 5 |
434 | __dabt_usr: | |
ccea7a19 | 435 | usr_entry |
b49c0f24 | 436 | kuser_cmpxchg_check |
1da177e4 | 437 | mov r2, sp |
da740472 RK |
438 | dabt_helper |
439 | b ret_from_exception | |
c4c5716e | 440 | UNWIND(.fnend ) |
93ed3970 | 441 | ENDPROC(__dabt_usr) |
1da177e4 LT |
442 | |
443 | .align 5 | |
444 | __irq_usr: | |
ccea7a19 | 445 | usr_entry |
bc089602 | 446 | kuser_cmpxchg_check |
187a51ad | 447 | irq_handler |
1613cc11 | 448 | get_thread_info tsk |
1da177e4 | 449 | mov why, #0 |
9fc2552a | 450 | b ret_to_user_from_irq |
c4c5716e | 451 | UNWIND(.fnend ) |
93ed3970 | 452 | ENDPROC(__irq_usr) |
1da177e4 LT |
453 | |
454 | .ltorg | |
455 | ||
456 | .align 5 | |
457 | __und_usr: | |
ccea7a19 | 458 | usr_entry |
bc089602 | 459 | |
b059bdc3 RK |
460 | mov r2, r4 |
461 | mov r3, r5 | |
1da177e4 | 462 | |
15ac49b6 RK |
463 | @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the |
464 | @ faulting instruction depending on Thumb mode. | |
465 | @ r3 = regs->ARM_cpsr | |
1da177e4 | 466 | @ |
15ac49b6 RK |
467 | @ The emulation code returns using r9 if it has emulated the |
468 | @ instruction, or the more conventional lr if we are to treat | |
469 | @ this as a real undefined instruction | |
1da177e4 | 470 | @ |
b86040a5 | 471 | adr r9, BSYM(ret_from_exception) |
15ac49b6 | 472 | |
1417a6b8 CM |
473 | @ IRQs must be enabled before attempting to read the instruction from |
474 | @ user space since that could cause a page/translation fault if the | |
475 | @ page table was modified by another CPU. | |
476 | enable_irq | |
477 | ||
cb170a45 | 478 | tst r3, #PSR_T_BIT @ Thumb mode? |
15ac49b6 RK |
479 | bne __und_usr_thumb |
480 | sub r4, r2, #4 @ ARM instr at LR - 4 | |
481 | 1: ldrt r0, [r4] | |
457c2403 BD |
482 | ARM_BE8(rev r0, r0) @ little endian instruction |
483 | ||
15ac49b6 RK |
484 | @ r0 = 32-bit ARM instruction which caused the exception |
485 | @ r2 = PC value for the following instruction (:= regs->ARM_pc) | |
486 | @ r4 = PC value for the faulting instruction | |
487 | @ lr = 32-bit undefined instruction function | |
488 | adr lr, BSYM(__und_usr_fault_32) | |
489 | b call_fpe | |
490 | ||
491 | __und_usr_thumb: | |
cb170a45 | 492 | @ Thumb instruction |
15ac49b6 | 493 | sub r4, r2, #2 @ First half of thumb instr at LR - 2 |
ef4c5368 DM |
494 | #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 |
495 | /* | |
496 | * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms | |
497 | * can never be supported in a single kernel, this code is not applicable at | |
498 | * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be | |
499 | * made about .arch directives. | |
500 | */ | |
501 | #if __LINUX_ARM_ARCH__ < 7 | |
502 | /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */ | |
503 | #define NEED_CPU_ARCHITECTURE | |
504 | ldr r5, .LCcpu_architecture | |
505 | ldr r5, [r5] | |
506 | cmp r5, #CPU_ARCH_ARMv7 | |
15ac49b6 | 507 | blo __und_usr_fault_16 @ 16bit undefined instruction |
ef4c5368 DM |
508 | /* |
509 | * The following code won't get run unless the running CPU really is v7, so | |
510 | * coding round the lack of ldrht on older arches is pointless. Temporarily | |
511 | * override the assembler target arch with the minimum required instead: | |
512 | */ | |
513 | .arch armv6t2 | |
514 | #endif | |
15ac49b6 | 515 | 2: ldrht r5, [r4] |
f8fe23ec | 516 | ARM_BE8(rev16 r5, r5) @ little endian instruction |
85519189 | 517 | cmp r5, #0xe800 @ 32bit instruction if xx != 0 |
15ac49b6 RK |
518 | blo __und_usr_fault_16 @ 16bit undefined instruction |
519 | 3: ldrht r0, [r2] | |
f8fe23ec | 520 | ARM_BE8(rev16 r0, r0) @ little endian instruction |
cb170a45 | 521 | add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 |
15ac49b6 | 522 | str r2, [sp, #S_PC] @ it's a 2x16bit instr, update |
cb170a45 | 523 | orr r0, r0, r5, lsl #16 |
15ac49b6 RK |
524 | adr lr, BSYM(__und_usr_fault_32) |
525 | @ r0 = the two 16-bit Thumb instructions which caused the exception | |
526 | @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc) | |
527 | @ r4 = PC value for the first 16-bit Thumb instruction | |
528 | @ lr = 32bit undefined instruction function | |
ef4c5368 DM |
529 | |
530 | #if __LINUX_ARM_ARCH__ < 7 | |
531 | /* If the target arch was overridden, change it back: */ | |
532 | #ifdef CONFIG_CPU_32v6K | |
533 | .arch armv6k | |
cb170a45 | 534 | #else |
ef4c5368 DM |
535 | .arch armv6 |
536 | #endif | |
537 | #endif /* __LINUX_ARM_ARCH__ < 7 */ | |
538 | #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ | |
15ac49b6 | 539 | b __und_usr_fault_16 |
cb170a45 | 540 | #endif |
15ac49b6 | 541 | UNWIND(.fnend) |
93ed3970 | 542 | ENDPROC(__und_usr) |
cb170a45 | 543 | |
1da177e4 | 544 | /* |
15ac49b6 | 545 | * The out of line fixup for the ldrt instructions above. |
1da177e4 | 546 | */ |
4260415f | 547 | .pushsection .fixup, "ax" |
667d1b48 | 548 | .align 2 |
3780f7ab | 549 | 4: str r4, [sp, #S_PC] @ retry current instruction |
6ebbf2ce | 550 | ret r9 |
4260415f RK |
551 | .popsection |
552 | .pushsection __ex_table,"a" | |
cb170a45 | 553 | .long 1b, 4b |
c89cefed | 554 | #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 |
cb170a45 PB |
555 | .long 2b, 4b |
556 | .long 3b, 4b | |
557 | #endif | |
4260415f | 558 | .popsection |
1da177e4 LT |
559 | |
560 | /* | |
561 | * Check whether the instruction is a co-processor instruction. | |
562 | * If yes, we need to call the relevant co-processor handler. | |
563 | * | |
564 | * Note that we don't do a full check here for the co-processor | |
565 | * instructions; all instructions with bit 27 set are well | |
566 | * defined. The only instructions that should fault are the | |
567 | * co-processor instructions. However, we have to watch out | |
568 | * for the ARM6/ARM7 SWI bug. | |
569 | * | |
b5872db4 CM |
570 | * NEON is a special case that has to be handled here. Not all |
571 | * NEON instructions are co-processor instructions, so we have | |
572 | * to make a special case of checking for them. Plus, there's | |
573 | * five groups of them, so we have a table of mask/opcode pairs | |
574 | * to check against, and if any match then we branch off into the | |
575 | * NEON handler code. | |
576 | * | |
1da177e4 | 577 | * Emulators may wish to make use of the following registers: |
15ac49b6 RK |
578 | * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) |
579 | * r2 = PC value to resume execution after successful emulation | |
db6ccbb6 | 580 | * r9 = normal "successful" return address |
15ac49b6 | 581 | * r10 = this threads thread_info structure |
db6ccbb6 | 582 | * lr = unrecognised instruction return address |
1417a6b8 | 583 | * IRQs enabled, FIQs enabled. |
1da177e4 | 584 | */ |
cb170a45 PB |
585 | @ |
586 | @ Fall-through from Thumb-2 __und_usr | |
587 | @ | |
588 | #ifdef CONFIG_NEON | |
d3f79584 | 589 | get_thread_info r10 @ get current thread |
cb170a45 PB |
590 | adr r6, .LCneon_thumb_opcodes |
591 | b 2f | |
592 | #endif | |
1da177e4 | 593 | call_fpe: |
d3f79584 | 594 | get_thread_info r10 @ get current thread |
b5872db4 | 595 | #ifdef CONFIG_NEON |
cb170a45 | 596 | adr r6, .LCneon_arm_opcodes |
d3f79584 | 597 | 2: ldr r5, [r6], #4 @ mask value |
b5872db4 | 598 | ldr r7, [r6], #4 @ opcode bits matching in mask |
d3f79584 RK |
599 | cmp r5, #0 @ end mask? |
600 | beq 1f | |
601 | and r8, r0, r5 | |
b5872db4 CM |
602 | cmp r8, r7 @ NEON instruction? |
603 | bne 2b | |
b5872db4 CM |
604 | mov r7, #1 |
605 | strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used | |
606 | strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used | |
607 | b do_vfp @ let VFP handler handle this | |
608 | 1: | |
609 | #endif | |
1da177e4 | 610 | tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 |
cb170a45 | 611 | tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 |
6ebbf2ce | 612 | reteq lr |
1da177e4 | 613 | and r8, r0, #0x00000f00 @ mask out CP number |
b86040a5 | 614 | THUMB( lsr r8, r8, #8 ) |
1da177e4 LT |
615 | mov r7, #1 |
616 | add r6, r10, #TI_USED_CP | |
b86040a5 CM |
617 | ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] |
618 | THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] | |
1da177e4 LT |
619 | #ifdef CONFIG_IWMMXT |
620 | @ Test if we need to give access to iWMMXt coprocessors | |
621 | ldr r5, [r10, #TI_FLAGS] | |
622 | rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only | |
623 | movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) | |
624 | bcs iwmmxt_task_enable | |
625 | #endif | |
b86040a5 CM |
626 | ARM( add pc, pc, r8, lsr #6 ) |
627 | THUMB( lsl r8, r8, #2 ) | |
628 | THUMB( add pc, r8 ) | |
629 | nop | |
630 | ||
6ebbf2ce | 631 | ret.w lr @ CP#0 |
b86040a5 CM |
632 | W(b) do_fpe @ CP#1 (FPE) |
633 | W(b) do_fpe @ CP#2 (FPE) | |
6ebbf2ce | 634 | ret.w lr @ CP#3 |
c17fad11 LB |
635 | #ifdef CONFIG_CRUNCH |
636 | b crunch_task_enable @ CP#4 (MaverickCrunch) | |
637 | b crunch_task_enable @ CP#5 (MaverickCrunch) | |
638 | b crunch_task_enable @ CP#6 (MaverickCrunch) | |
639 | #else | |
6ebbf2ce RK |
640 | ret.w lr @ CP#4 |
641 | ret.w lr @ CP#5 | |
642 | ret.w lr @ CP#6 | |
c17fad11 | 643 | #endif |
6ebbf2ce RK |
644 | ret.w lr @ CP#7 |
645 | ret.w lr @ CP#8 | |
646 | ret.w lr @ CP#9 | |
1da177e4 | 647 | #ifdef CONFIG_VFP |
b86040a5 CM |
648 | W(b) do_vfp @ CP#10 (VFP) |
649 | W(b) do_vfp @ CP#11 (VFP) | |
1da177e4 | 650 | #else |
6ebbf2ce RK |
651 | ret.w lr @ CP#10 (VFP) |
652 | ret.w lr @ CP#11 (VFP) | |
1da177e4 | 653 | #endif |
6ebbf2ce RK |
654 | ret.w lr @ CP#12 |
655 | ret.w lr @ CP#13 | |
656 | ret.w lr @ CP#14 (Debug) | |
657 | ret.w lr @ CP#15 (Control) | |
1da177e4 | 658 | |
ef4c5368 DM |
659 | #ifdef NEED_CPU_ARCHITECTURE |
660 | .align 2 | |
661 | .LCcpu_architecture: | |
662 | .word __cpu_architecture | |
663 | #endif | |
664 | ||
b5872db4 CM |
665 | #ifdef CONFIG_NEON |
666 | .align 6 | |
667 | ||
cb170a45 | 668 | .LCneon_arm_opcodes: |
b5872db4 CM |
669 | .word 0xfe000000 @ mask |
670 | .word 0xf2000000 @ opcode | |
671 | ||
672 | .word 0xff100000 @ mask | |
673 | .word 0xf4000000 @ opcode | |
674 | ||
cb170a45 PB |
675 | .word 0x00000000 @ mask |
676 | .word 0x00000000 @ opcode | |
677 | ||
678 | .LCneon_thumb_opcodes: | |
679 | .word 0xef000000 @ mask | |
680 | .word 0xef000000 @ opcode | |
681 | ||
682 | .word 0xff100000 @ mask | |
683 | .word 0xf9000000 @ opcode | |
684 | ||
b5872db4 CM |
685 | .word 0x00000000 @ mask |
686 | .word 0x00000000 @ opcode | |
687 | #endif | |
688 | ||
1da177e4 LT |
689 | do_fpe: |
690 | ldr r4, .LCfp | |
691 | add r10, r10, #TI_FPSTATE @ r10 = workspace | |
692 | ldr pc, [r4] @ Call FP module USR entry point | |
693 | ||
694 | /* | |
695 | * The FP module is called with these registers set: | |
696 | * r0 = instruction | |
697 | * r2 = PC+4 | |
698 | * r9 = normal "successful" return address | |
699 | * r10 = FP workspace | |
700 | * lr = unrecognised FP instruction return address | |
701 | */ | |
702 | ||
124efc27 | 703 | .pushsection .data |
1da177e4 | 704 | ENTRY(fp_enter) |
db6ccbb6 | 705 | .word no_fp |
124efc27 | 706 | .popsection |
1da177e4 | 707 | |
83e686ea | 708 | ENTRY(no_fp) |
6ebbf2ce | 709 | ret lr |
83e686ea | 710 | ENDPROC(no_fp) |
db6ccbb6 | 711 | |
15ac49b6 RK |
712 | __und_usr_fault_32: |
713 | mov r1, #4 | |
714 | b 1f | |
715 | __und_usr_fault_16: | |
716 | mov r1, #2 | |
1417a6b8 | 717 | 1: mov r0, sp |
b86040a5 | 718 | adr lr, BSYM(ret_from_exception) |
15ac49b6 RK |
719 | b __und_fault |
720 | ENDPROC(__und_usr_fault_32) | |
721 | ENDPROC(__und_usr_fault_16) | |
1da177e4 LT |
722 | |
723 | .align 5 | |
724 | __pabt_usr: | |
ccea7a19 | 725 | usr_entry |
4fb28474 | 726 | mov r2, sp @ regs |
8dfe7ac9 | 727 | pabt_helper |
c4c5716e | 728 | UNWIND(.fnend ) |
1da177e4 LT |
729 | /* fall through */ |
730 | /* | |
731 | * This is the return code to user mode for abort handlers | |
732 | */ | |
733 | ENTRY(ret_from_exception) | |
c4c5716e CM |
734 | UNWIND(.fnstart ) |
735 | UNWIND(.cantunwind ) | |
1da177e4 LT |
736 | get_thread_info tsk |
737 | mov why, #0 | |
738 | b ret_to_user | |
c4c5716e | 739 | UNWIND(.fnend ) |
93ed3970 CM |
740 | ENDPROC(__pabt_usr) |
741 | ENDPROC(ret_from_exception) | |
1da177e4 | 742 | |
c0e7f7ee DT |
743 | .align 5 |
744 | __fiq_usr: | |
745 | usr_entry trace=0 | |
746 | kuser_cmpxchg_check | |
747 | mov r0, sp @ struct pt_regs *regs | |
748 | bl handle_fiq_as_nmi | |
749 | get_thread_info tsk | |
750 | restore_user_regs fast = 0, offset = 0 | |
751 | UNWIND(.fnend ) | |
752 | ENDPROC(__fiq_usr) | |
753 | ||
1da177e4 LT |
754 | /* |
755 | * Register switch for ARMv3 and ARMv4 processors | |
756 | * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info | |
757 | * previous and next are guaranteed not to be the same. | |
758 | */ | |
759 | ENTRY(__switch_to) | |
c4c5716e CM |
760 | UNWIND(.fnstart ) |
761 | UNWIND(.cantunwind ) | |
1da177e4 | 762 | add ip, r1, #TI_CPU_SAVE |
b86040a5 CM |
763 | ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack |
764 | THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack | |
765 | THUMB( str sp, [ip], #4 ) | |
766 | THUMB( str lr, [ip], #4 ) | |
a4780ade AH |
767 | ldr r4, [r2, #TI_TP_VALUE] |
768 | ldr r5, [r2, #TI_TP_VALUE + 4] | |
247055aa | 769 | #ifdef CONFIG_CPU_USE_DOMAINS |
d6551e88 | 770 | ldr r6, [r2, #TI_CPU_DOMAIN] |
afeb90ca | 771 | #endif |
a4780ade | 772 | switch_tls r1, r4, r5, r3, r7 |
df0698be NP |
773 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) |
774 | ldr r7, [r2, #TI_TASK] | |
775 | ldr r8, =__stack_chk_guard | |
776 | ldr r7, [r7, #TSK_STACK_CANARY] | |
777 | #endif | |
247055aa | 778 | #ifdef CONFIG_CPU_USE_DOMAINS |
1da177e4 | 779 | mcr p15, 0, r6, c3, c0, 0 @ Set domain register |
1da177e4 | 780 | #endif |
d6551e88 RK |
781 | mov r5, r0 |
782 | add r4, r2, #TI_CPU_SAVE | |
783 | ldr r0, =thread_notify_head | |
784 | mov r1, #THREAD_NOTIFY_SWITCH | |
785 | bl atomic_notifier_call_chain | |
df0698be NP |
786 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) |
787 | str r7, [r8] | |
788 | #endif | |
b86040a5 | 789 | THUMB( mov ip, r4 ) |
d6551e88 | 790 | mov r0, r5 |
b86040a5 CM |
791 | ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously |
792 | THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously | |
793 | THUMB( ldr sp, [ip], #4 ) | |
794 | THUMB( ldr pc, [ip] ) | |
c4c5716e | 795 | UNWIND(.fnend ) |
93ed3970 | 796 | ENDPROC(__switch_to) |
1da177e4 LT |
797 | |
798 | __INIT | |
2d2669b6 NP |
799 | |
800 | /* | |
801 | * User helpers. | |
802 | * | |
2d2669b6 NP |
803 | * Each segment is 32-byte aligned and will be moved to the top of the high |
804 | * vector page. New segments (if ever needed) must be added in front of | |
805 | * existing ones. This mechanism should be used only for things that are | |
806 | * really small and justified, and not be abused freely. | |
807 | * | |
37b83046 | 808 | * See Documentation/arm/kernel_user_helpers.txt for formal definitions. |
2d2669b6 | 809 | */ |
b86040a5 | 810 | THUMB( .arm ) |
2d2669b6 | 811 | |
ba9b5d76 NP |
812 | .macro usr_ret, reg |
813 | #ifdef CONFIG_ARM_THUMB | |
814 | bx \reg | |
815 | #else | |
6ebbf2ce | 816 | ret \reg |
ba9b5d76 NP |
817 | #endif |
818 | .endm | |
819 | ||
5b43e7a3 RK |
820 | .macro kuser_pad, sym, size |
821 | .if (. - \sym) & 3 | |
822 | .rept 4 - (. - \sym) & 3 | |
823 | .byte 0 | |
824 | .endr | |
825 | .endif | |
826 | .rept (\size - (. - \sym)) / 4 | |
827 | .word 0xe7fddef1 | |
828 | .endr | |
829 | .endm | |
830 | ||
f6f91b0d | 831 | #ifdef CONFIG_KUSER_HELPERS |
2d2669b6 NP |
832 | .align 5 |
833 | .globl __kuser_helper_start | |
834 | __kuser_helper_start: | |
835 | ||
7c612bfd | 836 | /* |
40fb79c8 NP |
837 | * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular |
838 | * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point. | |
7c612bfd NP |
839 | */ |
840 | ||
40fb79c8 NP |
841 | __kuser_cmpxchg64: @ 0xffff0f60 |
842 | ||
843 | #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | |
844 | ||
845 | /* | |
846 | * Poor you. No fast solution possible... | |
847 | * The kernel itself must perform the operation. | |
848 | * A special ghost syscall is used for that (see traps.c). | |
849 | */ | |
850 | stmfd sp!, {r7, lr} | |
851 | ldr r7, 1f @ it's 20 bits | |
852 | swi __ARM_NR_cmpxchg64 | |
853 | ldmfd sp!, {r7, pc} | |
854 | 1: .word __ARM_NR_cmpxchg64 | |
855 | ||
856 | #elif defined(CONFIG_CPU_32v6K) | |
857 | ||
858 | stmfd sp!, {r4, r5, r6, r7} | |
859 | ldrd r4, r5, [r0] @ load old val | |
860 | ldrd r6, r7, [r1] @ load new val | |
861 | smp_dmb arm | |
862 | 1: ldrexd r0, r1, [r2] @ load current val | |
863 | eors r3, r0, r4 @ compare with oldval (1) | |
864 | eoreqs r3, r1, r5 @ compare with oldval (2) | |
865 | strexdeq r3, r6, r7, [r2] @ store newval if eq | |
866 | teqeq r3, #1 @ success? | |
867 | beq 1b @ if no then retry | |
ed3768a8 | 868 | smp_dmb arm |
40fb79c8 NP |
869 | rsbs r0, r3, #0 @ set returned val and C flag |
870 | ldmfd sp!, {r4, r5, r6, r7} | |
5a97d0ae | 871 | usr_ret lr |
40fb79c8 NP |
872 | |
873 | #elif !defined(CONFIG_SMP) | |
874 | ||
875 | #ifdef CONFIG_MMU | |
876 | ||
877 | /* | |
878 | * The only thing that can break atomicity in this cmpxchg64 | |
879 | * implementation is either an IRQ or a data abort exception | |
880 | * causing another process/thread to be scheduled in the middle of | |
881 | * the critical sequence. The same strategy as for cmpxchg is used. | |
882 | */ | |
883 | stmfd sp!, {r4, r5, r6, lr} | |
884 | ldmia r0, {r4, r5} @ load old val | |
885 | ldmia r1, {r6, lr} @ load new val | |
886 | 1: ldmia r2, {r0, r1} @ load current val | |
887 | eors r3, r0, r4 @ compare with oldval (1) | |
888 | eoreqs r3, r1, r5 @ compare with oldval (2) | |
889 | 2: stmeqia r2, {r6, lr} @ store newval if eq | |
890 | rsbs r0, r3, #0 @ set return val and C flag | |
891 | ldmfd sp!, {r4, r5, r6, pc} | |
892 | ||
893 | .text | |
894 | kuser_cmpxchg64_fixup: | |
895 | @ Called from kuser_cmpxchg_fixup. | |
3ad55155 | 896 | @ r4 = address of interrupted insn (must be preserved). |
40fb79c8 NP |
897 | @ sp = saved regs. r7 and r8 are clobbered. |
898 | @ 1b = first critical insn, 2b = last critical insn. | |
3ad55155 | 899 | @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. |
40fb79c8 NP |
900 | mov r7, #0xffff0fff |
901 | sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) | |
3ad55155 | 902 | subs r8, r4, r7 |
40fb79c8 NP |
903 | rsbcss r8, r8, #(2b - 1b) |
904 | strcs r7, [sp, #S_PC] | |
905 | #if __LINUX_ARM_ARCH__ < 6 | |
906 | bcc kuser_cmpxchg32_fixup | |
907 | #endif | |
6ebbf2ce | 908 | ret lr |
40fb79c8 NP |
909 | .previous |
910 | ||
911 | #else | |
912 | #warning "NPTL on non MMU needs fixing" | |
913 | mov r0, #-1 | |
914 | adds r0, r0, #0 | |
ba9b5d76 | 915 | usr_ret lr |
40fb79c8 NP |
916 | #endif |
917 | ||
918 | #else | |
919 | #error "incoherent kernel configuration" | |
920 | #endif | |
921 | ||
5b43e7a3 | 922 | kuser_pad __kuser_cmpxchg64, 64 |
7c612bfd | 923 | |
7c612bfd | 924 | __kuser_memory_barrier: @ 0xffff0fa0 |
ed3768a8 | 925 | smp_dmb arm |
ba9b5d76 | 926 | usr_ret lr |
7c612bfd | 927 | |
5b43e7a3 | 928 | kuser_pad __kuser_memory_barrier, 32 |
2d2669b6 NP |
929 | |
930 | __kuser_cmpxchg: @ 0xffff0fc0 | |
931 | ||
dcef1f63 | 932 | #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) |
2d2669b6 | 933 | |
dcef1f63 NP |
934 | /* |
935 | * Poor you. No fast solution possible... | |
936 | * The kernel itself must perform the operation. | |
937 | * A special ghost syscall is used for that (see traps.c). | |
938 | */ | |
5e097445 | 939 | stmfd sp!, {r7, lr} |
55afd264 | 940 | ldr r7, 1f @ it's 20 bits |
cc20d429 | 941 | swi __ARM_NR_cmpxchg |
5e097445 | 942 | ldmfd sp!, {r7, pc} |
cc20d429 | 943 | 1: .word __ARM_NR_cmpxchg |
dcef1f63 NP |
944 | |
945 | #elif __LINUX_ARM_ARCH__ < 6 | |
2d2669b6 | 946 | |
b49c0f24 NP |
947 | #ifdef CONFIG_MMU |
948 | ||
2d2669b6 | 949 | /* |
b49c0f24 NP |
950 | * The only thing that can break atomicity in this cmpxchg |
951 | * implementation is either an IRQ or a data abort exception | |
952 | * causing another process/thread to be scheduled in the middle | |
953 | * of the critical sequence. To prevent this, code is added to | |
954 | * the IRQ and data abort exception handlers to set the pc back | |
955 | * to the beginning of the critical section if it is found to be | |
956 | * within that critical section (see kuser_cmpxchg_fixup). | |
2d2669b6 | 957 | */ |
b49c0f24 NP |
958 | 1: ldr r3, [r2] @ load current val |
959 | subs r3, r3, r0 @ compare with oldval | |
960 | 2: streq r1, [r2] @ store newval if eq | |
961 | rsbs r0, r3, #0 @ set return val and C flag | |
962 | usr_ret lr | |
963 | ||
964 | .text | |
40fb79c8 | 965 | kuser_cmpxchg32_fixup: |
b49c0f24 | 966 | @ Called from kuser_cmpxchg_check macro. |
b059bdc3 | 967 | @ r4 = address of interrupted insn (must be preserved). |
b49c0f24 NP |
968 | @ sp = saved regs. r7 and r8 are clobbered. |
969 | @ 1b = first critical insn, 2b = last critical insn. | |
b059bdc3 | 970 | @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. |
b49c0f24 NP |
971 | mov r7, #0xffff0fff |
972 | sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) | |
b059bdc3 | 973 | subs r8, r4, r7 |
b49c0f24 NP |
974 | rsbcss r8, r8, #(2b - 1b) |
975 | strcs r7, [sp, #S_PC] | |
6ebbf2ce | 976 | ret lr |
b49c0f24 NP |
977 | .previous |
978 | ||
49bca4c2 NP |
979 | #else |
980 | #warning "NPTL on non MMU needs fixing" | |
981 | mov r0, #-1 | |
982 | adds r0, r0, #0 | |
ba9b5d76 | 983 | usr_ret lr |
b49c0f24 | 984 | #endif |
2d2669b6 NP |
985 | |
986 | #else | |
987 | ||
ed3768a8 | 988 | smp_dmb arm |
b49c0f24 | 989 | 1: ldrex r3, [r2] |
2d2669b6 NP |
990 | subs r3, r3, r0 |
991 | strexeq r3, r1, [r2] | |
b49c0f24 NP |
992 | teqeq r3, #1 |
993 | beq 1b | |
2d2669b6 | 994 | rsbs r0, r3, #0 |
b49c0f24 | 995 | /* beware -- each __kuser slot must be 8 instructions max */ |
f00ec48f RK |
996 | ALT_SMP(b __kuser_memory_barrier) |
997 | ALT_UP(usr_ret lr) | |
2d2669b6 NP |
998 | |
999 | #endif | |
1000 | ||
5b43e7a3 | 1001 | kuser_pad __kuser_cmpxchg, 32 |
2d2669b6 | 1002 | |
2d2669b6 | 1003 | __kuser_get_tls: @ 0xffff0fe0 |
f159f4ed | 1004 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init |
ba9b5d76 | 1005 | usr_ret lr |
f159f4ed | 1006 | mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code |
5b43e7a3 RK |
1007 | kuser_pad __kuser_get_tls, 16 |
1008 | .rep 3 | |
f159f4ed TL |
1009 | .word 0 @ 0xffff0ff0 software TLS value, then |
1010 | .endr @ pad up to __kuser_helper_version | |
2d2669b6 | 1011 | |
2d2669b6 NP |
1012 | __kuser_helper_version: @ 0xffff0ffc |
1013 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) | |
1014 | ||
1015 | .globl __kuser_helper_end | |
1016 | __kuser_helper_end: | |
1017 | ||
f6f91b0d RK |
1018 | #endif |
1019 | ||
b86040a5 | 1020 | THUMB( .thumb ) |
2d2669b6 | 1021 | |
1da177e4 LT |
1022 | /* |
1023 | * Vector stubs. | |
1024 | * | |
19accfd3 RK |
1025 | * This code is copied to 0xffff1000 so we can use branches in the |
1026 | * vectors, rather than ldr's. Note that this code must not exceed | |
1027 | * a page size. | |
1da177e4 LT |
1028 | * |
1029 | * Common stub entry macro: | |
1030 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | |
ccea7a19 RK |
1031 | * |
1032 | * SP points to a minimal amount of processor-private memory, the address | |
1033 | * of which is copied into r0 for the mode specific abort handler. | |
1da177e4 | 1034 | */ |
b7ec4795 | 1035 | .macro vector_stub, name, mode, correction=0 |
1da177e4 LT |
1036 | .align 5 |
1037 | ||
1038 | vector_\name: | |
1da177e4 LT |
1039 | .if \correction |
1040 | sub lr, lr, #\correction | |
1041 | .endif | |
ccea7a19 RK |
1042 | |
1043 | @ | |
1044 | @ Save r0, lr_<exception> (parent PC) and spsr_<exception> | |
1045 | @ (parent CPSR) | |
1046 | @ | |
1047 | stmia sp, {r0, lr} @ save r0, lr | |
1da177e4 | 1048 | mrs lr, spsr |
ccea7a19 RK |
1049 | str lr, [sp, #8] @ save spsr |
1050 | ||
1da177e4 | 1051 | @ |
ccea7a19 | 1052 | @ Prepare for SVC32 mode. IRQs remain disabled. |
1da177e4 | 1053 | @ |
ccea7a19 | 1054 | mrs r0, cpsr |
b86040a5 | 1055 | eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE) |
ccea7a19 | 1056 | msr spsr_cxsf, r0 |
1da177e4 | 1057 | |
ccea7a19 RK |
1058 | @ |
1059 | @ the branch table must immediately follow this code | |
1060 | @ | |
ccea7a19 | 1061 | and lr, lr, #0x0f |
b86040a5 CM |
1062 | THUMB( adr r0, 1f ) |
1063 | THUMB( ldr lr, [r0, lr, lsl #2] ) | |
b7ec4795 | 1064 | mov r0, sp |
b86040a5 | 1065 | ARM( ldr lr, [pc, lr, lsl #2] ) |
ccea7a19 | 1066 | movs pc, lr @ branch to handler in SVC mode |
93ed3970 | 1067 | ENDPROC(vector_\name) |
88987ef9 CM |
1068 | |
1069 | .align 2 | |
1070 | @ handler addresses follow this label | |
1071 | 1: | |
1da177e4 LT |
1072 | .endm |
1073 | ||
b9b32bf7 | 1074 | .section .stubs, "ax", %progbits |
1da177e4 | 1075 | __stubs_start: |
19accfd3 RK |
1076 | @ This must be the first word |
1077 | .word vector_swi | |
1078 | ||
1079 | vector_rst: | |
1080 | ARM( swi SYS_ERROR0 ) | |
1081 | THUMB( svc #0 ) | |
1082 | THUMB( nop ) | |
1083 | b vector_und | |
1084 | ||
1da177e4 LT |
1085 | /* |
1086 | * Interrupt dispatcher | |
1087 | */ | |
b7ec4795 | 1088 | vector_stub irq, IRQ_MODE, 4 |
1da177e4 LT |
1089 | |
1090 | .long __irq_usr @ 0 (USR_26 / USR_32) | |
1091 | .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) | |
1092 | .long __irq_invalid @ 2 (IRQ_26 / IRQ_32) | |
1093 | .long __irq_svc @ 3 (SVC_26 / SVC_32) | |
1094 | .long __irq_invalid @ 4 | |
1095 | .long __irq_invalid @ 5 | |
1096 | .long __irq_invalid @ 6 | |
1097 | .long __irq_invalid @ 7 | |
1098 | .long __irq_invalid @ 8 | |
1099 | .long __irq_invalid @ 9 | |
1100 | .long __irq_invalid @ a | |
1101 | .long __irq_invalid @ b | |
1102 | .long __irq_invalid @ c | |
1103 | .long __irq_invalid @ d | |
1104 | .long __irq_invalid @ e | |
1105 | .long __irq_invalid @ f | |
1106 | ||
1107 | /* | |
1108 | * Data abort dispatcher | |
1109 | * Enter in ABT mode, spsr = USR CPSR, lr = USR PC | |
1110 | */ | |
b7ec4795 | 1111 | vector_stub dabt, ABT_MODE, 8 |
1da177e4 LT |
1112 | |
1113 | .long __dabt_usr @ 0 (USR_26 / USR_32) | |
1114 | .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) | |
1115 | .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32) | |
1116 | .long __dabt_svc @ 3 (SVC_26 / SVC_32) | |
1117 | .long __dabt_invalid @ 4 | |
1118 | .long __dabt_invalid @ 5 | |
1119 | .long __dabt_invalid @ 6 | |
1120 | .long __dabt_invalid @ 7 | |
1121 | .long __dabt_invalid @ 8 | |
1122 | .long __dabt_invalid @ 9 | |
1123 | .long __dabt_invalid @ a | |
1124 | .long __dabt_invalid @ b | |
1125 | .long __dabt_invalid @ c | |
1126 | .long __dabt_invalid @ d | |
1127 | .long __dabt_invalid @ e | |
1128 | .long __dabt_invalid @ f | |
1129 | ||
1130 | /* | |
1131 | * Prefetch abort dispatcher | |
1132 | * Enter in ABT mode, spsr = USR CPSR, lr = USR PC | |
1133 | */ | |
b7ec4795 | 1134 | vector_stub pabt, ABT_MODE, 4 |
1da177e4 LT |
1135 | |
1136 | .long __pabt_usr @ 0 (USR_26 / USR_32) | |
1137 | .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) | |
1138 | .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32) | |
1139 | .long __pabt_svc @ 3 (SVC_26 / SVC_32) | |
1140 | .long __pabt_invalid @ 4 | |
1141 | .long __pabt_invalid @ 5 | |
1142 | .long __pabt_invalid @ 6 | |
1143 | .long __pabt_invalid @ 7 | |
1144 | .long __pabt_invalid @ 8 | |
1145 | .long __pabt_invalid @ 9 | |
1146 | .long __pabt_invalid @ a | |
1147 | .long __pabt_invalid @ b | |
1148 | .long __pabt_invalid @ c | |
1149 | .long __pabt_invalid @ d | |
1150 | .long __pabt_invalid @ e | |
1151 | .long __pabt_invalid @ f | |
1152 | ||
1153 | /* | |
1154 | * Undef instr entry dispatcher | |
1155 | * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | |
1156 | */ | |
b7ec4795 | 1157 | vector_stub und, UND_MODE |
1da177e4 LT |
1158 | |
1159 | .long __und_usr @ 0 (USR_26 / USR_32) | |
1160 | .long __und_invalid @ 1 (FIQ_26 / FIQ_32) | |
1161 | .long __und_invalid @ 2 (IRQ_26 / IRQ_32) | |
1162 | .long __und_svc @ 3 (SVC_26 / SVC_32) | |
1163 | .long __und_invalid @ 4 | |
1164 | .long __und_invalid @ 5 | |
1165 | .long __und_invalid @ 6 | |
1166 | .long __und_invalid @ 7 | |
1167 | .long __und_invalid @ 8 | |
1168 | .long __und_invalid @ 9 | |
1169 | .long __und_invalid @ a | |
1170 | .long __und_invalid @ b | |
1171 | .long __und_invalid @ c | |
1172 | .long __und_invalid @ d | |
1173 | .long __und_invalid @ e | |
1174 | .long __und_invalid @ f | |
1175 | ||
1176 | .align 5 | |
1177 | ||
19accfd3 RK |
1178 | /*============================================================================= |
1179 | * Address exception handler | |
1180 | *----------------------------------------------------------------------------- | |
1181 | * These aren't too critical. | |
1182 | * (they're not supposed to happen, and won't happen in 32-bit data mode). | |
1183 | */ | |
1184 | ||
1185 | vector_addrexcptn: | |
1186 | b vector_addrexcptn | |
1187 | ||
1da177e4 | 1188 | /*============================================================================= |
c0e7f7ee | 1189 | * FIQ "NMI" handler |
1da177e4 | 1190 | *----------------------------------------------------------------------------- |
c0e7f7ee DT |
1191 | * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86 |
1192 | * systems. | |
1da177e4 | 1193 | */ |
c0e7f7ee DT |
1194 | vector_stub fiq, FIQ_MODE, 4 |
1195 | ||
1196 | .long __fiq_usr @ 0 (USR_26 / USR_32) | |
1197 | .long __fiq_svc @ 1 (FIQ_26 / FIQ_32) | |
1198 | .long __fiq_svc @ 2 (IRQ_26 / IRQ_32) | |
1199 | .long __fiq_svc @ 3 (SVC_26 / SVC_32) | |
1200 | .long __fiq_svc @ 4 | |
1201 | .long __fiq_svc @ 5 | |
1202 | .long __fiq_svc @ 6 | |
1203 | .long __fiq_abt @ 7 | |
1204 | .long __fiq_svc @ 8 | |
1205 | .long __fiq_svc @ 9 | |
1206 | .long __fiq_svc @ a | |
1207 | .long __fiq_svc @ b | |
1208 | .long __fiq_svc @ c | |
1209 | .long __fiq_svc @ d | |
1210 | .long __fiq_svc @ e | |
1211 | .long __fiq_svc @ f | |
1da177e4 | 1212 | |
e39e3f3e RK |
1213 | .globl vector_fiq_offset |
1214 | .equ vector_fiq_offset, vector_fiq | |
1215 | ||
b9b32bf7 | 1216 | .section .vectors, "ax", %progbits |
7933523d | 1217 | __vectors_start: |
b9b32bf7 RK |
1218 | W(b) vector_rst |
1219 | W(b) vector_und | |
1220 | W(ldr) pc, __vectors_start + 0x1000 | |
1221 | W(b) vector_pabt | |
1222 | W(b) vector_dabt | |
1223 | W(b) vector_addrexcptn | |
1224 | W(b) vector_irq | |
1225 | W(b) vector_fiq | |
1da177e4 LT |
1226 | |
1227 | .data | |
1228 | ||
1da177e4 | 1229 | .globl cr_alignment |
1da177e4 LT |
1230 | cr_alignment: |
1231 | .space 4 | |
52108641 | 1232 | |
1233 | #ifdef CONFIG_MULTI_IRQ_HANDLER | |
1234 | .globl handle_arch_irq | |
1235 | handle_arch_irq: | |
1236 | .space 4 | |
1237 | #endif |