Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/entry-armv.S | |
3 | * | |
4 | * Copyright (C) 1996,1997,1998 Russell King. | |
5 | * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) | |
afeb90ca | 6 | * nommu support by Hyok S. Choi (hyok.choi@samsung.com) |
1da177e4 LT |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * Low-level vector interface routines | |
13 | * | |
70b6f2b4 NP |
14 | * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction |
15 | * that causes it to save wrong values... Be aware! | |
1da177e4 | 16 | */ |
1da177e4 | 17 | |
f09b9979 | 18 | #include <asm/memory.h> |
1da177e4 | 19 | #include <asm/glue.h> |
1da177e4 | 20 | #include <asm/vfpmacros.h> |
bce495d8 | 21 | #include <asm/arch/entry-macro.S> |
d6551e88 | 22 | #include <asm/thread_notify.h> |
1da177e4 LT |
23 | |
24 | #include "entry-header.S" | |
25 | ||
187a51ad RK |
26 | /* |
27 | * Interrupt handling. Preserves r7, r8, r9 | |
28 | */ | |
29 | .macro irq_handler | |
f80dff9d | 30 | get_irqnr_preamble r5, lr |
187a51ad RK |
31 | 1: get_irqnr_and_base r0, r6, r5, lr |
32 | movne r1, sp | |
33 | @ | |
34 | @ routine called with r0 = irq number, r1 = struct pt_regs * | |
35 | @ | |
36 | adrne lr, 1b | |
37 | bne asm_do_IRQ | |
791be9b9 RK |
38 | |
39 | #ifdef CONFIG_SMP | |
40 | /* | |
41 | * XXX | |
42 | * | |
43 | * this macro assumes that irqstat (r6) and base (r5) are | |
44 | * preserved from get_irqnr_and_base above | |
45 | */ | |
46 | test_for_ipi r0, r6, r5, lr | |
47 | movne r0, sp | |
48 | adrne lr, 1b | |
49 | bne do_IPI | |
37ee16ae RK |
50 | |
51 | #ifdef CONFIG_LOCAL_TIMERS | |
52 | test_for_ltirq r0, r6, r5, lr | |
53 | movne r0, sp | |
54 | adrne lr, 1b | |
55 | bne do_local_timer | |
56 | #endif | |
791be9b9 RK |
57 | #endif |
58 | ||
187a51ad RK |
59 | .endm |
60 | ||
785d3cd2 NP |
61 | #ifdef CONFIG_KPROBES |
62 | .section .kprobes.text,"ax",%progbits | |
63 | #else | |
64 | .text | |
65 | #endif | |
66 | ||
1da177e4 LT |
67 | /* |
68 | * Invalid mode handlers | |
69 | */ | |
ccea7a19 RK |
70 | .macro inv_entry, reason |
71 | sub sp, sp, #S_FRAME_SIZE | |
72 | stmib sp, {r1 - lr} | |
1da177e4 LT |
73 | mov r1, #\reason |
74 | .endm | |
75 | ||
76 | __pabt_invalid: | |
ccea7a19 RK |
77 | inv_entry BAD_PREFETCH |
78 | b common_invalid | |
1da177e4 LT |
79 | |
80 | __dabt_invalid: | |
ccea7a19 RK |
81 | inv_entry BAD_DATA |
82 | b common_invalid | |
1da177e4 LT |
83 | |
84 | __irq_invalid: | |
ccea7a19 RK |
85 | inv_entry BAD_IRQ |
86 | b common_invalid | |
1da177e4 LT |
87 | |
88 | __und_invalid: | |
ccea7a19 RK |
89 | inv_entry BAD_UNDEFINSTR |
90 | ||
91 | @ | |
92 | @ XXX fall through to common_invalid | |
93 | @ | |
94 | ||
95 | @ | |
96 | @ common_invalid - generic code for failed exception (re-entrant version of handlers) | |
97 | @ | |
98 | common_invalid: | |
99 | zero_fp | |
100 | ||
101 | ldmia r0, {r4 - r6} | |
102 | add r0, sp, #S_PC @ here for interlock avoidance | |
103 | mov r7, #-1 @ "" "" "" "" | |
104 | str r4, [sp] @ save preserved r0 | |
105 | stmia r0, {r5 - r7} @ lr_<exception>, | |
106 | @ cpsr_<exception>, "old_r0" | |
1da177e4 | 107 | |
1da177e4 | 108 | mov r0, sp |
1da177e4 LT |
109 | b bad_mode |
110 | ||
111 | /* | |
112 | * SVC mode handlers | |
113 | */ | |
2dede2d8 NP |
114 | |
115 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) | |
116 | #define SPFIX(code...) code | |
117 | #else | |
118 | #define SPFIX(code...) | |
119 | #endif | |
120 | ||
d30a0c8b NP |
121 | .macro svc_entry, stack_hole=0 |
122 | sub sp, sp, #(S_FRAME_SIZE + \stack_hole) | |
2dede2d8 NP |
123 | SPFIX( tst sp, #4 ) |
124 | SPFIX( bicne sp, sp, #4 ) | |
ccea7a19 RK |
125 | stmib sp, {r1 - r12} |
126 | ||
127 | ldmia r0, {r1 - r3} | |
128 | add r5, sp, #S_SP @ here for interlock avoidance | |
129 | mov r4, #-1 @ "" "" "" "" | |
d30a0c8b | 130 | add r0, sp, #(S_FRAME_SIZE + \stack_hole) |
2dede2d8 | 131 | SPFIX( addne r0, r0, #4 ) |
ccea7a19 RK |
132 | str r1, [sp] @ save the "real" r0 copied |
133 | @ from the exception stack | |
134 | ||
1da177e4 LT |
135 | mov r1, lr |
136 | ||
137 | @ | |
138 | @ We are now ready to fill in the remaining blanks on the stack: | |
139 | @ | |
140 | @ r0 - sp_svc | |
141 | @ r1 - lr_svc | |
142 | @ r2 - lr_<exception>, already fixed up for correct return/restart | |
143 | @ r3 - spsr_<exception> | |
144 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | |
145 | @ | |
146 | stmia r5, {r0 - r4} | |
147 | .endm | |
148 | ||
149 | .align 5 | |
150 | __dabt_svc: | |
ccea7a19 | 151 | svc_entry |
1da177e4 LT |
152 | |
153 | @ | |
154 | @ get ready to re-enable interrupts if appropriate | |
155 | @ | |
156 | mrs r9, cpsr | |
157 | tst r3, #PSR_I_BIT | |
158 | biceq r9, r9, #PSR_I_BIT | |
159 | ||
160 | @ | |
161 | @ Call the processor-specific abort handler: | |
162 | @ | |
163 | @ r2 - aborted context pc | |
164 | @ r3 - aborted context cpsr | |
165 | @ | |
166 | @ The abort handler must return the aborted address in r0, and | |
167 | @ the fault status register in r1. r9 must be preserved. | |
168 | @ | |
169 | #ifdef MULTI_ABORT | |
170 | ldr r4, .LCprocfns | |
171 | mov lr, pc | |
172 | ldr pc, [r4] | |
173 | #else | |
174 | bl CPU_ABORT_HANDLER | |
175 | #endif | |
176 | ||
177 | @ | |
178 | @ set desired IRQ state, then call main handler | |
179 | @ | |
180 | msr cpsr_c, r9 | |
181 | mov r2, sp | |
182 | bl do_DataAbort | |
183 | ||
184 | @ | |
185 | @ IRQs off again before pulling preserved data off the stack | |
186 | @ | |
1ec42c0c | 187 | disable_irq |
1da177e4 LT |
188 | |
189 | @ | |
190 | @ restore SPSR and restart the instruction | |
191 | @ | |
192 | ldr r0, [sp, #S_PSR] | |
193 | msr spsr_cxsf, r0 | |
194 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | |
195 | ||
196 | .align 5 | |
197 | __irq_svc: | |
ccea7a19 RK |
198 | svc_entry |
199 | ||
7ad1bcb2 RK |
200 | #ifdef CONFIG_TRACE_IRQFLAGS |
201 | bl trace_hardirqs_off | |
202 | #endif | |
1da177e4 | 203 | #ifdef CONFIG_PREEMPT |
706fdd9f RK |
204 | get_thread_info tsk |
205 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count | |
206 | add r7, r8, #1 @ increment it | |
207 | str r7, [tsk, #TI_PREEMPT] | |
1da177e4 | 208 | #endif |
ccea7a19 | 209 | |
187a51ad | 210 | irq_handler |
1da177e4 | 211 | #ifdef CONFIG_PREEMPT |
706fdd9f | 212 | ldr r0, [tsk, #TI_FLAGS] @ get flags |
1da177e4 LT |
213 | tst r0, #_TIF_NEED_RESCHED |
214 | blne svc_preempt | |
215 | preempt_return: | |
706fdd9f RK |
216 | ldr r0, [tsk, #TI_PREEMPT] @ read preempt value |
217 | str r8, [tsk, #TI_PREEMPT] @ restore preempt count | |
1da177e4 | 218 | teq r0, r7 |
1da177e4 LT |
219 | strne r0, [r0, -r0] @ bug() |
220 | #endif | |
221 | ldr r0, [sp, #S_PSR] @ irqs are already disabled | |
222 | msr spsr_cxsf, r0 | |
7ad1bcb2 RK |
223 | #ifdef CONFIG_TRACE_IRQFLAGS |
224 | tst r0, #PSR_I_BIT | |
225 | bleq trace_hardirqs_on | |
226 | #endif | |
1da177e4 LT |
227 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr |
228 | ||
229 | .ltorg | |
230 | ||
231 | #ifdef CONFIG_PREEMPT | |
232 | svc_preempt: | |
706fdd9f | 233 | teq r8, #0 @ was preempt count = 0 |
1da177e4 LT |
234 | ldreq r6, .LCirq_stat |
235 | movne pc, lr @ no | |
236 | ldr r0, [r6, #4] @ local_irq_count | |
237 | ldr r1, [r6, #8] @ local_bh_count | |
238 | adds r0, r0, r1 | |
239 | movne pc, lr | |
240 | mov r7, #0 @ preempt_schedule_irq | |
706fdd9f | 241 | str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0 |
1da177e4 | 242 | 1: bl preempt_schedule_irq @ irq en/disable is done inside |
706fdd9f | 243 | ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS |
1da177e4 LT |
244 | tst r0, #_TIF_NEED_RESCHED |
245 | beq preempt_return @ go again | |
246 | b 1b | |
247 | #endif | |
248 | ||
249 | .align 5 | |
250 | __und_svc: | |
d30a0c8b NP |
251 | #ifdef CONFIG_KPROBES |
252 | @ If a kprobe is about to simulate a "stmdb sp..." instruction, | |
253 | @ it obviously needs free stack space which then will belong to | |
254 | @ the saved context. | |
255 | svc_entry 64 | |
256 | #else | |
ccea7a19 | 257 | svc_entry |
d30a0c8b | 258 | #endif |
1da177e4 LT |
259 | |
260 | @ | |
261 | @ call emulation code, which returns using r9 if it has emulated | |
262 | @ the instruction, or the more conventional lr if we are to treat | |
263 | @ this as a real undefined instruction | |
264 | @ | |
265 | @ r0 - instruction | |
266 | @ | |
267 | ldr r0, [r2, #-4] | |
268 | adr r9, 1f | |
269 | bl call_fpe | |
270 | ||
271 | mov r0, sp @ struct pt_regs *regs | |
272 | bl do_undefinstr | |
273 | ||
274 | @ | |
275 | @ IRQs off again before pulling preserved data off the stack | |
276 | @ | |
1ec42c0c | 277 | 1: disable_irq |
1da177e4 LT |
278 | |
279 | @ | |
280 | @ restore SPSR and restart the instruction | |
281 | @ | |
282 | ldr lr, [sp, #S_PSR] @ Get SVC cpsr | |
283 | msr spsr_cxsf, lr | |
284 | ldmia sp, {r0 - pc}^ @ Restore SVC registers | |
285 | ||
286 | .align 5 | |
287 | __pabt_svc: | |
ccea7a19 | 288 | svc_entry |
1da177e4 LT |
289 | |
290 | @ | |
291 | @ re-enable interrupts if appropriate | |
292 | @ | |
293 | mrs r9, cpsr | |
294 | tst r3, #PSR_I_BIT | |
295 | biceq r9, r9, #PSR_I_BIT | |
296 | msr cpsr_c, r9 | |
297 | ||
298 | @ | |
299 | @ set args, then call main handler | |
300 | @ | |
301 | @ r0 - address of faulting instruction | |
302 | @ r1 - pointer to registers on stack | |
303 | @ | |
304 | mov r0, r2 @ address (pc) | |
305 | mov r1, sp @ regs | |
306 | bl do_PrefetchAbort @ call abort handler | |
307 | ||
308 | @ | |
309 | @ IRQs off again before pulling preserved data off the stack | |
310 | @ | |
1ec42c0c | 311 | disable_irq |
1da177e4 LT |
312 | |
313 | @ | |
314 | @ restore SPSR and restart the instruction | |
315 | @ | |
316 | ldr r0, [sp, #S_PSR] | |
317 | msr spsr_cxsf, r0 | |
318 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | |
319 | ||
320 | .align 5 | |
49f680ea RK |
321 | .LCcralign: |
322 | .word cr_alignment | |
1da177e4 LT |
323 | #ifdef MULTI_ABORT |
324 | .LCprocfns: | |
325 | .word processor | |
326 | #endif | |
327 | .LCfp: | |
328 | .word fp_enter | |
329 | #ifdef CONFIG_PREEMPT | |
330 | .LCirq_stat: | |
331 | .word irq_stat | |
332 | #endif | |
333 | ||
334 | /* | |
335 | * User mode handlers | |
2dede2d8 NP |
336 | * |
337 | * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE | |
1da177e4 | 338 | */ |
2dede2d8 NP |
339 | |
340 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7) | |
341 | #error "sizeof(struct pt_regs) must be a multiple of 8" | |
342 | #endif | |
343 | ||
ccea7a19 RK |
344 | .macro usr_entry |
345 | sub sp, sp, #S_FRAME_SIZE | |
346 | stmib sp, {r1 - r12} | |
347 | ||
348 | ldmia r0, {r1 - r3} | |
349 | add r0, sp, #S_PC @ here for interlock avoidance | |
350 | mov r4, #-1 @ "" "" "" "" | |
351 | ||
352 | str r1, [sp] @ save the "real" r0 copied | |
353 | @ from the exception stack | |
1da177e4 LT |
354 | |
355 | @ | |
356 | @ We are now ready to fill in the remaining blanks on the stack: | |
357 | @ | |
358 | @ r2 - lr_<exception>, already fixed up for correct return/restart | |
359 | @ r3 - spsr_<exception> | |
360 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | |
361 | @ | |
362 | @ Also, separately save sp_usr and lr_usr | |
363 | @ | |
ccea7a19 RK |
364 | stmia r0, {r2 - r4} |
365 | stmdb r0, {sp, lr}^ | |
1da177e4 LT |
366 | |
367 | @ | |
368 | @ Enable the alignment trap while in kernel mode | |
369 | @ | |
49f680ea | 370 | alignment_trap r0 |
1da177e4 LT |
371 | |
372 | @ | |
373 | @ Clear FP to mark the first stack frame | |
374 | @ | |
375 | zero_fp | |
376 | .endm | |
377 | ||
b49c0f24 NP |
378 | .macro kuser_cmpxchg_check |
379 | #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | |
380 | #ifndef CONFIG_MMU | |
381 | #warning "NPTL on non MMU needs fixing" | |
382 | #else | |
383 | @ Make sure our user space atomic helper is restarted | |
384 | @ if it was interrupted in a critical region. Here we | |
385 | @ perform a quick test inline since it should be false | |
386 | @ 99.9999% of the time. The rest is done out of line. | |
387 | cmp r2, #TASK_SIZE | |
388 | blhs kuser_cmpxchg_fixup | |
389 | #endif | |
390 | #endif | |
391 | .endm | |
392 | ||
1da177e4 LT |
393 | .align 5 |
394 | __dabt_usr: | |
ccea7a19 | 395 | usr_entry |
b49c0f24 | 396 | kuser_cmpxchg_check |
1da177e4 LT |
397 | |
398 | @ | |
399 | @ Call the processor-specific abort handler: | |
400 | @ | |
401 | @ r2 - aborted context pc | |
402 | @ r3 - aborted context cpsr | |
403 | @ | |
404 | @ The abort handler must return the aborted address in r0, and | |
405 | @ the fault status register in r1. | |
406 | @ | |
407 | #ifdef MULTI_ABORT | |
408 | ldr r4, .LCprocfns | |
409 | mov lr, pc | |
410 | ldr pc, [r4] | |
411 | #else | |
412 | bl CPU_ABORT_HANDLER | |
413 | #endif | |
414 | ||
415 | @ | |
416 | @ IRQs on, then call the main handler | |
417 | @ | |
1ec42c0c | 418 | enable_irq |
1da177e4 LT |
419 | mov r2, sp |
420 | adr lr, ret_from_exception | |
421 | b do_DataAbort | |
422 | ||
423 | .align 5 | |
424 | __irq_usr: | |
ccea7a19 | 425 | usr_entry |
b49c0f24 | 426 | kuser_cmpxchg_check |
1da177e4 | 427 | |
7ad1bcb2 RK |
428 | #ifdef CONFIG_TRACE_IRQFLAGS |
429 | bl trace_hardirqs_off | |
430 | #endif | |
706fdd9f | 431 | get_thread_info tsk |
1da177e4 | 432 | #ifdef CONFIG_PREEMPT |
706fdd9f RK |
433 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count |
434 | add r7, r8, #1 @ increment it | |
435 | str r7, [tsk, #TI_PREEMPT] | |
1da177e4 | 436 | #endif |
ccea7a19 | 437 | |
187a51ad | 438 | irq_handler |
1da177e4 | 439 | #ifdef CONFIG_PREEMPT |
706fdd9f RK |
440 | ldr r0, [tsk, #TI_PREEMPT] |
441 | str r8, [tsk, #TI_PREEMPT] | |
1da177e4 | 442 | teq r0, r7 |
1da177e4 | 443 | strne r0, [r0, -r0] |
1da177e4 | 444 | #endif |
7ad1bcb2 RK |
445 | #ifdef CONFIG_TRACE_IRQFLAGS |
446 | bl trace_hardirqs_on | |
447 | #endif | |
ccea7a19 | 448 | |
1da177e4 LT |
449 | mov why, #0 |
450 | b ret_to_user | |
451 | ||
452 | .ltorg | |
453 | ||
454 | .align 5 | |
455 | __und_usr: | |
ccea7a19 | 456 | usr_entry |
1da177e4 LT |
457 | |
458 | tst r3, #PSR_T_BIT @ Thumb mode? | |
db6ccbb6 | 459 | bne __und_usr_unknown @ ignore FP |
1da177e4 LT |
460 | sub r4, r2, #4 |
461 | ||
462 | @ | |
463 | @ fall through to the emulation code, which returns using r9 if | |
464 | @ it has emulated the instruction, or the more conventional lr | |
465 | @ if we are to treat this as a real undefined instruction | |
466 | @ | |
467 | @ r0 - instruction | |
468 | @ | |
1da177e4 | 469 | adr r9, ret_from_exception |
db6ccbb6 | 470 | adr lr, __und_usr_unknown |
d28a170d | 471 | 1: ldrt r0, [r4] |
1da177e4 LT |
472 | @ |
473 | @ fallthrough to call_fpe | |
474 | @ | |
475 | ||
476 | /* | |
477 | * The out of line fixup for the ldrt above. | |
478 | */ | |
479 | .section .fixup, "ax" | |
480 | 2: mov pc, r9 | |
481 | .previous | |
482 | .section __ex_table,"a" | |
483 | .long 1b, 2b | |
484 | .previous | |
485 | ||
486 | /* | |
487 | * Check whether the instruction is a co-processor instruction. | |
488 | * If yes, we need to call the relevant co-processor handler. | |
489 | * | |
490 | * Note that we don't do a full check here for the co-processor | |
491 | * instructions; all instructions with bit 27 set are well | |
492 | * defined. The only instructions that should fault are the | |
493 | * co-processor instructions. However, we have to watch out | |
494 | * for the ARM6/ARM7 SWI bug. | |
495 | * | |
b5872db4 CM |
496 | * NEON is a special case that has to be handled here. Not all |
497 | * NEON instructions are co-processor instructions, so we have | |
498 | * to make a special case of checking for them. Plus, there's | |
499 | * five groups of them, so we have a table of mask/opcode pairs | |
500 | * to check against, and if any match then we branch off into the | |
501 | * NEON handler code. | |
502 | * | |
1da177e4 LT |
503 | * Emulators may wish to make use of the following registers: |
504 | * r0 = instruction opcode. | |
505 | * r2 = PC+4 | |
db6ccbb6 | 506 | * r9 = normal "successful" return address |
1da177e4 | 507 | * r10 = this threads thread_info structure. |
db6ccbb6 | 508 | * lr = unrecognised instruction return address |
1da177e4 LT |
509 | */ |
510 | call_fpe: | |
b5872db4 CM |
511 | #ifdef CONFIG_NEON |
512 | adr r6, .LCneon_opcodes | |
513 | 2: | |
514 | ldr r7, [r6], #4 @ mask value | |
515 | cmp r7, #0 @ end mask? | |
516 | beq 1f | |
517 | and r8, r0, r7 | |
518 | ldr r7, [r6], #4 @ opcode bits matching in mask | |
519 | cmp r8, r7 @ NEON instruction? | |
520 | bne 2b | |
521 | get_thread_info r10 | |
522 | mov r7, #1 | |
523 | strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used | |
524 | strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used | |
525 | b do_vfp @ let VFP handler handle this | |
526 | 1: | |
527 | #endif | |
1da177e4 LT |
528 | tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 |
529 | #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) | |
530 | and r8, r0, #0x0f000000 @ mask out op-code bits | |
531 | teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)? | |
532 | #endif | |
533 | moveq pc, lr | |
534 | get_thread_info r10 @ get current thread | |
535 | and r8, r0, #0x00000f00 @ mask out CP number | |
536 | mov r7, #1 | |
537 | add r6, r10, #TI_USED_CP | |
538 | strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[] | |
539 | #ifdef CONFIG_IWMMXT | |
540 | @ Test if we need to give access to iWMMXt coprocessors | |
541 | ldr r5, [r10, #TI_FLAGS] | |
542 | rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only | |
543 | movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) | |
544 | bcs iwmmxt_task_enable | |
545 | #endif | |
1da177e4 LT |
546 | add pc, pc, r8, lsr #6 |
547 | mov r0, r0 | |
548 | ||
549 | mov pc, lr @ CP#0 | |
550 | b do_fpe @ CP#1 (FPE) | |
551 | b do_fpe @ CP#2 (FPE) | |
552 | mov pc, lr @ CP#3 | |
c17fad11 LB |
553 | #ifdef CONFIG_CRUNCH |
554 | b crunch_task_enable @ CP#4 (MaverickCrunch) | |
555 | b crunch_task_enable @ CP#5 (MaverickCrunch) | |
556 | b crunch_task_enable @ CP#6 (MaverickCrunch) | |
557 | #else | |
1da177e4 LT |
558 | mov pc, lr @ CP#4 |
559 | mov pc, lr @ CP#5 | |
560 | mov pc, lr @ CP#6 | |
c17fad11 | 561 | #endif |
1da177e4 LT |
562 | mov pc, lr @ CP#7 |
563 | mov pc, lr @ CP#8 | |
564 | mov pc, lr @ CP#9 | |
565 | #ifdef CONFIG_VFP | |
566 | b do_vfp @ CP#10 (VFP) | |
567 | b do_vfp @ CP#11 (VFP) | |
568 | #else | |
569 | mov pc, lr @ CP#10 (VFP) | |
570 | mov pc, lr @ CP#11 (VFP) | |
571 | #endif | |
572 | mov pc, lr @ CP#12 | |
573 | mov pc, lr @ CP#13 | |
574 | mov pc, lr @ CP#14 (Debug) | |
575 | mov pc, lr @ CP#15 (Control) | |
576 | ||
b5872db4 CM |
577 | #ifdef CONFIG_NEON |
578 | .align 6 | |
579 | ||
580 | .LCneon_opcodes: | |
581 | .word 0xfe000000 @ mask | |
582 | .word 0xf2000000 @ opcode | |
583 | ||
584 | .word 0xff100000 @ mask | |
585 | .word 0xf4000000 @ opcode | |
586 | ||
587 | .word 0x00000000 @ mask | |
588 | .word 0x00000000 @ opcode | |
589 | #endif | |
590 | ||
1da177e4 | 591 | do_fpe: |
5d25ac03 | 592 | enable_irq |
1da177e4 LT |
593 | ldr r4, .LCfp |
594 | add r10, r10, #TI_FPSTATE @ r10 = workspace | |
595 | ldr pc, [r4] @ Call FP module USR entry point | |
596 | ||
597 | /* | |
598 | * The FP module is called with these registers set: | |
599 | * r0 = instruction | |
600 | * r2 = PC+4 | |
601 | * r9 = normal "successful" return address | |
602 | * r10 = FP workspace | |
603 | * lr = unrecognised FP instruction return address | |
604 | */ | |
605 | ||
606 | .data | |
607 | ENTRY(fp_enter) | |
db6ccbb6 | 608 | .word no_fp |
785d3cd2 | 609 | .previous |
1da177e4 | 610 | |
db6ccbb6 RK |
611 | no_fp: mov pc, lr |
612 | ||
613 | __und_usr_unknown: | |
1da177e4 LT |
614 | mov r0, sp |
615 | adr lr, ret_from_exception | |
616 | b do_undefinstr | |
617 | ||
618 | .align 5 | |
619 | __pabt_usr: | |
ccea7a19 | 620 | usr_entry |
1da177e4 | 621 | |
1ec42c0c | 622 | enable_irq @ Enable interrupts |
1da177e4 LT |
623 | mov r0, r2 @ address (pc) |
624 | mov r1, sp @ regs | |
625 | bl do_PrefetchAbort @ call abort handler | |
626 | /* fall through */ | |
627 | /* | |
628 | * This is the return code to user mode for abort handlers | |
629 | */ | |
630 | ENTRY(ret_from_exception) | |
631 | get_thread_info tsk | |
632 | mov why, #0 | |
633 | b ret_to_user | |
634 | ||
635 | /* | |
636 | * Register switch for ARMv3 and ARMv4 processors | |
637 | * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info | |
638 | * previous and next are guaranteed not to be the same. | |
639 | */ | |
640 | ENTRY(__switch_to) | |
641 | add ip, r1, #TI_CPU_SAVE | |
642 | ldr r3, [r2, #TI_TP_VALUE] | |
643 | stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack | |
d6551e88 RK |
644 | #ifdef CONFIG_MMU |
645 | ldr r6, [r2, #TI_CPU_DOMAIN] | |
afeb90ca | 646 | #endif |
b876386e | 647 | #if __LINUX_ARM_ARCH__ >= 6 |
43cc1981 | 648 | #ifdef CONFIG_CPU_32v6K |
b876386e RK |
649 | clrex |
650 | #else | |
73394322 | 651 | strex r5, r4, [ip] @ Clear exclusive monitor |
b876386e RK |
652 | #endif |
653 | #endif | |
4b0e07a5 | 654 | #if defined(CONFIG_HAS_TLS_REG) |
2d2669b6 | 655 | mcr p15, 0, r3, c13, c0, 3 @ set TLS register |
4b0e07a5 | 656 | #elif !defined(CONFIG_TLS_REG_EMUL) |
1da177e4 | 657 | mov r4, #0xffff0fff |
2d2669b6 NP |
658 | str r3, [r4, #-15] @ TLS val at 0xffff0ff0 |
659 | #endif | |
afeb90ca | 660 | #ifdef CONFIG_MMU |
1da177e4 | 661 | mcr p15, 0, r6, c3, c0, 0 @ Set domain register |
1da177e4 | 662 | #endif |
d6551e88 RK |
663 | mov r5, r0 |
664 | add r4, r2, #TI_CPU_SAVE | |
665 | ldr r0, =thread_notify_head | |
666 | mov r1, #THREAD_NOTIFY_SWITCH | |
667 | bl atomic_notifier_call_chain | |
668 | mov r0, r5 | |
669 | ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously | |
1da177e4 LT |
670 | |
671 | __INIT | |
2d2669b6 NP |
672 | |
673 | /* | |
674 | * User helpers. | |
675 | * | |
676 | * These are segment of kernel provided user code reachable from user space | |
677 | * at a fixed address in kernel memory. This is used to provide user space | |
678 | * with some operations which require kernel help because of unimplemented | |
679 | * native feature and/or instructions in many ARM CPUs. The idea is for | |
680 | * this code to be executed directly in user mode for best efficiency but | |
681 | * which is too intimate with the kernel counter part to be left to user | |
682 | * libraries. In fact this code might even differ from one CPU to another | |
683 | * depending on the available instruction set and restrictions like on | |
684 | * SMP systems. In other words, the kernel reserves the right to change | |
685 | * this code as needed without warning. Only the entry points and their | |
686 | * results are guaranteed to be stable. | |
687 | * | |
688 | * Each segment is 32-byte aligned and will be moved to the top of the high | |
689 | * vector page. New segments (if ever needed) must be added in front of | |
690 | * existing ones. This mechanism should be used only for things that are | |
691 | * really small and justified, and not be abused freely. | |
692 | * | |
693 | * User space is expected to implement those things inline when optimizing | |
694 | * for a processor that has the necessary native support, but only if such | |
695 | * resulting binaries are already to be incompatible with earlier ARM | |
696 | * processors due to the use of unsupported instructions other than what | |
697 | * is provided here. In other words don't make binaries unable to run on | |
698 | * earlier processors just for the sake of not using these kernel helpers | |
699 | * if your compiled code is not going to use the new instructions for other | |
700 | * purpose. | |
701 | */ | |
702 | ||
ba9b5d76 NP |
703 | .macro usr_ret, reg |
704 | #ifdef CONFIG_ARM_THUMB | |
705 | bx \reg | |
706 | #else | |
707 | mov pc, \reg | |
708 | #endif | |
709 | .endm | |
710 | ||
2d2669b6 NP |
711 | .align 5 |
712 | .globl __kuser_helper_start | |
713 | __kuser_helper_start: | |
714 | ||
7c612bfd NP |
715 | /* |
716 | * Reference prototype: | |
717 | * | |
718 | * void __kernel_memory_barrier(void) | |
719 | * | |
720 | * Input: | |
721 | * | |
722 | * lr = return address | |
723 | * | |
724 | * Output: | |
725 | * | |
726 | * none | |
727 | * | |
728 | * Clobbered: | |
729 | * | |
b49c0f24 | 730 | * none |
7c612bfd NP |
731 | * |
732 | * Definition and user space usage example: | |
733 | * | |
734 | * typedef void (__kernel_dmb_t)(void); | |
735 | * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0) | |
736 | * | |
737 | * Apply any needed memory barrier to preserve consistency with data modified | |
738 | * manually and __kuser_cmpxchg usage. | |
739 | * | |
740 | * This could be used as follows: | |
741 | * | |
742 | * #define __kernel_dmb() \ | |
743 | * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ | |
6896eec0 | 744 | * : : : "r0", "lr","cc" ) |
7c612bfd NP |
745 | */ |
746 | ||
747 | __kuser_memory_barrier: @ 0xffff0fa0 | |
748 | ||
749 | #if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP) | |
750 | mcr p15, 0, r0, c7, c10, 5 @ dmb | |
751 | #endif | |
ba9b5d76 | 752 | usr_ret lr |
7c612bfd NP |
753 | |
754 | .align 5 | |
755 | ||
2d2669b6 NP |
756 | /* |
757 | * Reference prototype: | |
758 | * | |
759 | * int __kernel_cmpxchg(int oldval, int newval, int *ptr) | |
760 | * | |
761 | * Input: | |
762 | * | |
763 | * r0 = oldval | |
764 | * r1 = newval | |
765 | * r2 = ptr | |
766 | * lr = return address | |
767 | * | |
768 | * Output: | |
769 | * | |
770 | * r0 = returned value (zero or non-zero) | |
771 | * C flag = set if r0 == 0, clear if r0 != 0 | |
772 | * | |
773 | * Clobbered: | |
774 | * | |
775 | * r3, ip, flags | |
776 | * | |
777 | * Definition and user space usage example: | |
778 | * | |
779 | * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); | |
780 | * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) | |
781 | * | |
782 | * Atomically store newval in *ptr if *ptr is equal to oldval for user space. | |
783 | * Return zero if *ptr was changed or non-zero if no exchange happened. | |
784 | * The C flag is also set if *ptr was changed to allow for assembly | |
785 | * optimization in the calling code. | |
786 | * | |
5964eae8 NP |
787 | * Notes: |
788 | * | |
789 | * - This routine already includes memory barriers as needed. | |
790 | * | |
2d2669b6 NP |
791 | * For example, a user space atomic_add implementation could look like this: |
792 | * | |
793 | * #define atomic_add(ptr, val) \ | |
794 | * ({ register unsigned int *__ptr asm("r2") = (ptr); \ | |
795 | * register unsigned int __result asm("r1"); \ | |
796 | * asm volatile ( \ | |
797 | * "1: @ atomic_add\n\t" \ | |
798 | * "ldr r0, [r2]\n\t" \ | |
799 | * "mov r3, #0xffff0fff\n\t" \ | |
800 | * "add lr, pc, #4\n\t" \ | |
801 | * "add r1, r0, %2\n\t" \ | |
802 | * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ | |
803 | * "bcc 1b" \ | |
804 | * : "=&r" (__result) \ | |
805 | * : "r" (__ptr), "rIL" (val) \ | |
806 | * : "r0","r3","ip","lr","cc","memory" ); \ | |
807 | * __result; }) | |
808 | */ | |
809 | ||
810 | __kuser_cmpxchg: @ 0xffff0fc0 | |
811 | ||
dcef1f63 | 812 | #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) |
2d2669b6 | 813 | |
dcef1f63 NP |
814 | /* |
815 | * Poor you. No fast solution possible... | |
816 | * The kernel itself must perform the operation. | |
817 | * A special ghost syscall is used for that (see traps.c). | |
818 | */ | |
5e097445 NP |
819 | stmfd sp!, {r7, lr} |
820 | mov r7, #0xff00 @ 0xfff0 into r7 for EABI | |
821 | orr r7, r7, #0xf0 | |
dcef1f63 | 822 | swi #0x9ffff0 |
5e097445 | 823 | ldmfd sp!, {r7, pc} |
dcef1f63 NP |
824 | |
825 | #elif __LINUX_ARM_ARCH__ < 6 | |
2d2669b6 | 826 | |
b49c0f24 NP |
827 | #ifdef CONFIG_MMU |
828 | ||
2d2669b6 | 829 | /* |
b49c0f24 NP |
830 | * The only thing that can break atomicity in this cmpxchg |
831 | * implementation is either an IRQ or a data abort exception | |
832 | * causing another process/thread to be scheduled in the middle | |
833 | * of the critical sequence. To prevent this, code is added to | |
834 | * the IRQ and data abort exception handlers to set the pc back | |
835 | * to the beginning of the critical section if it is found to be | |
836 | * within that critical section (see kuser_cmpxchg_fixup). | |
2d2669b6 | 837 | */ |
b49c0f24 NP |
838 | 1: ldr r3, [r2] @ load current val |
839 | subs r3, r3, r0 @ compare with oldval | |
840 | 2: streq r1, [r2] @ store newval if eq | |
841 | rsbs r0, r3, #0 @ set return val and C flag | |
842 | usr_ret lr | |
843 | ||
844 | .text | |
845 | kuser_cmpxchg_fixup: | |
846 | @ Called from kuser_cmpxchg_check macro. | |
847 | @ r2 = address of interrupted insn (must be preserved). | |
848 | @ sp = saved regs. r7 and r8 are clobbered. | |
849 | @ 1b = first critical insn, 2b = last critical insn. | |
850 | @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. | |
851 | mov r7, #0xffff0fff | |
852 | sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) | |
853 | subs r8, r2, r7 | |
854 | rsbcss r8, r8, #(2b - 1b) | |
855 | strcs r7, [sp, #S_PC] | |
856 | mov pc, lr | |
857 | .previous | |
858 | ||
49bca4c2 NP |
859 | #else |
860 | #warning "NPTL on non MMU needs fixing" | |
861 | mov r0, #-1 | |
862 | adds r0, r0, #0 | |
ba9b5d76 | 863 | usr_ret lr |
b49c0f24 | 864 | #endif |
2d2669b6 NP |
865 | |
866 | #else | |
867 | ||
7c612bfd NP |
868 | #ifdef CONFIG_SMP |
869 | mcr p15, 0, r0, c7, c10, 5 @ dmb | |
870 | #endif | |
b49c0f24 | 871 | 1: ldrex r3, [r2] |
2d2669b6 NP |
872 | subs r3, r3, r0 |
873 | strexeq r3, r1, [r2] | |
b49c0f24 NP |
874 | teqeq r3, #1 |
875 | beq 1b | |
2d2669b6 | 876 | rsbs r0, r3, #0 |
b49c0f24 | 877 | /* beware -- each __kuser slot must be 8 instructions max */ |
7c612bfd | 878 | #ifdef CONFIG_SMP |
b49c0f24 NP |
879 | b __kuser_memory_barrier |
880 | #else | |
ba9b5d76 | 881 | usr_ret lr |
b49c0f24 | 882 | #endif |
2d2669b6 NP |
883 | |
884 | #endif | |
885 | ||
886 | .align 5 | |
887 | ||
888 | /* | |
889 | * Reference prototype: | |
890 | * | |
891 | * int __kernel_get_tls(void) | |
892 | * | |
893 | * Input: | |
894 | * | |
895 | * lr = return address | |
896 | * | |
897 | * Output: | |
898 | * | |
899 | * r0 = TLS value | |
900 | * | |
901 | * Clobbered: | |
902 | * | |
b49c0f24 | 903 | * none |
2d2669b6 NP |
904 | * |
905 | * Definition and user space usage example: | |
906 | * | |
907 | * typedef int (__kernel_get_tls_t)(void); | |
908 | * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) | |
909 | * | |
910 | * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. | |
911 | * | |
912 | * This could be used as follows: | |
913 | * | |
914 | * #define __kernel_get_tls() \ | |
915 | * ({ register unsigned int __val asm("r0"); \ | |
916 | * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ | |
917 | * : "=r" (__val) : : "lr","cc" ); \ | |
918 | * __val; }) | |
919 | */ | |
920 | ||
921 | __kuser_get_tls: @ 0xffff0fe0 | |
922 | ||
4b0e07a5 | 923 | #if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL) |
2d2669b6 | 924 | ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0 |
2d2669b6 | 925 | #else |
2d2669b6 | 926 | mrc p15, 0, r0, c13, c0, 3 @ read TLS register |
2d2669b6 | 927 | #endif |
ba9b5d76 | 928 | usr_ret lr |
2d2669b6 NP |
929 | |
930 | .rep 5 | |
931 | .word 0 @ pad up to __kuser_helper_version | |
932 | .endr | |
933 | ||
934 | /* | |
935 | * Reference declaration: | |
936 | * | |
937 | * extern unsigned int __kernel_helper_version; | |
938 | * | |
939 | * Definition and user space usage example: | |
940 | * | |
941 | * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc) | |
942 | * | |
943 | * User space may read this to determine the curent number of helpers | |
944 | * available. | |
945 | */ | |
946 | ||
947 | __kuser_helper_version: @ 0xffff0ffc | |
948 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) | |
949 | ||
950 | .globl __kuser_helper_end | |
951 | __kuser_helper_end: | |
952 | ||
953 | ||
1da177e4 LT |
954 | /* |
955 | * Vector stubs. | |
956 | * | |
7933523d RK |
957 | * This code is copied to 0xffff0200 so we can use branches in the |
958 | * vectors, rather than ldr's. Note that this code must not | |
959 | * exceed 0x300 bytes. | |
1da177e4 LT |
960 | * |
961 | * Common stub entry macro: | |
962 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | |
ccea7a19 RK |
963 | * |
964 | * SP points to a minimal amount of processor-private memory, the address | |
965 | * of which is copied into r0 for the mode specific abort handler. | |
1da177e4 | 966 | */ |
b7ec4795 | 967 | .macro vector_stub, name, mode, correction=0 |
1da177e4 LT |
968 | .align 5 |
969 | ||
970 | vector_\name: | |
1da177e4 LT |
971 | .if \correction |
972 | sub lr, lr, #\correction | |
973 | .endif | |
ccea7a19 RK |
974 | |
975 | @ | |
976 | @ Save r0, lr_<exception> (parent PC) and spsr_<exception> | |
977 | @ (parent CPSR) | |
978 | @ | |
979 | stmia sp, {r0, lr} @ save r0, lr | |
1da177e4 | 980 | mrs lr, spsr |
ccea7a19 RK |
981 | str lr, [sp, #8] @ save spsr |
982 | ||
1da177e4 | 983 | @ |
ccea7a19 | 984 | @ Prepare for SVC32 mode. IRQs remain disabled. |
1da177e4 | 985 | @ |
ccea7a19 | 986 | mrs r0, cpsr |
b7ec4795 | 987 | eor r0, r0, #(\mode ^ SVC_MODE) |
ccea7a19 | 988 | msr spsr_cxsf, r0 |
1da177e4 | 989 | |
ccea7a19 RK |
990 | @ |
991 | @ the branch table must immediately follow this code | |
992 | @ | |
ccea7a19 | 993 | and lr, lr, #0x0f |
b7ec4795 | 994 | mov r0, sp |
1da177e4 | 995 | ldr lr, [pc, lr, lsl #2] |
ccea7a19 | 996 | movs pc, lr @ branch to handler in SVC mode |
1da177e4 LT |
997 | .endm |
998 | ||
7933523d | 999 | .globl __stubs_start |
1da177e4 LT |
1000 | __stubs_start: |
1001 | /* | |
1002 | * Interrupt dispatcher | |
1003 | */ | |
b7ec4795 | 1004 | vector_stub irq, IRQ_MODE, 4 |
1da177e4 LT |
1005 | |
1006 | .long __irq_usr @ 0 (USR_26 / USR_32) | |
1007 | .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) | |
1008 | .long __irq_invalid @ 2 (IRQ_26 / IRQ_32) | |
1009 | .long __irq_svc @ 3 (SVC_26 / SVC_32) | |
1010 | .long __irq_invalid @ 4 | |
1011 | .long __irq_invalid @ 5 | |
1012 | .long __irq_invalid @ 6 | |
1013 | .long __irq_invalid @ 7 | |
1014 | .long __irq_invalid @ 8 | |
1015 | .long __irq_invalid @ 9 | |
1016 | .long __irq_invalid @ a | |
1017 | .long __irq_invalid @ b | |
1018 | .long __irq_invalid @ c | |
1019 | .long __irq_invalid @ d | |
1020 | .long __irq_invalid @ e | |
1021 | .long __irq_invalid @ f | |
1022 | ||
1023 | /* | |
1024 | * Data abort dispatcher | |
1025 | * Enter in ABT mode, spsr = USR CPSR, lr = USR PC | |
1026 | */ | |
b7ec4795 | 1027 | vector_stub dabt, ABT_MODE, 8 |
1da177e4 LT |
1028 | |
1029 | .long __dabt_usr @ 0 (USR_26 / USR_32) | |
1030 | .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) | |
1031 | .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32) | |
1032 | .long __dabt_svc @ 3 (SVC_26 / SVC_32) | |
1033 | .long __dabt_invalid @ 4 | |
1034 | .long __dabt_invalid @ 5 | |
1035 | .long __dabt_invalid @ 6 | |
1036 | .long __dabt_invalid @ 7 | |
1037 | .long __dabt_invalid @ 8 | |
1038 | .long __dabt_invalid @ 9 | |
1039 | .long __dabt_invalid @ a | |
1040 | .long __dabt_invalid @ b | |
1041 | .long __dabt_invalid @ c | |
1042 | .long __dabt_invalid @ d | |
1043 | .long __dabt_invalid @ e | |
1044 | .long __dabt_invalid @ f | |
1045 | ||
1046 | /* | |
1047 | * Prefetch abort dispatcher | |
1048 | * Enter in ABT mode, spsr = USR CPSR, lr = USR PC | |
1049 | */ | |
b7ec4795 | 1050 | vector_stub pabt, ABT_MODE, 4 |
1da177e4 LT |
1051 | |
1052 | .long __pabt_usr @ 0 (USR_26 / USR_32) | |
1053 | .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) | |
1054 | .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32) | |
1055 | .long __pabt_svc @ 3 (SVC_26 / SVC_32) | |
1056 | .long __pabt_invalid @ 4 | |
1057 | .long __pabt_invalid @ 5 | |
1058 | .long __pabt_invalid @ 6 | |
1059 | .long __pabt_invalid @ 7 | |
1060 | .long __pabt_invalid @ 8 | |
1061 | .long __pabt_invalid @ 9 | |
1062 | .long __pabt_invalid @ a | |
1063 | .long __pabt_invalid @ b | |
1064 | .long __pabt_invalid @ c | |
1065 | .long __pabt_invalid @ d | |
1066 | .long __pabt_invalid @ e | |
1067 | .long __pabt_invalid @ f | |
1068 | ||
1069 | /* | |
1070 | * Undef instr entry dispatcher | |
1071 | * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | |
1072 | */ | |
b7ec4795 | 1073 | vector_stub und, UND_MODE |
1da177e4 LT |
1074 | |
1075 | .long __und_usr @ 0 (USR_26 / USR_32) | |
1076 | .long __und_invalid @ 1 (FIQ_26 / FIQ_32) | |
1077 | .long __und_invalid @ 2 (IRQ_26 / IRQ_32) | |
1078 | .long __und_svc @ 3 (SVC_26 / SVC_32) | |
1079 | .long __und_invalid @ 4 | |
1080 | .long __und_invalid @ 5 | |
1081 | .long __und_invalid @ 6 | |
1082 | .long __und_invalid @ 7 | |
1083 | .long __und_invalid @ 8 | |
1084 | .long __und_invalid @ 9 | |
1085 | .long __und_invalid @ a | |
1086 | .long __und_invalid @ b | |
1087 | .long __und_invalid @ c | |
1088 | .long __und_invalid @ d | |
1089 | .long __und_invalid @ e | |
1090 | .long __und_invalid @ f | |
1091 | ||
1092 | .align 5 | |
1093 | ||
1094 | /*============================================================================= | |
1095 | * Undefined FIQs | |
1096 | *----------------------------------------------------------------------------- | |
1097 | * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC | |
1098 | * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg. | |
1099 | * Basically to switch modes, we *HAVE* to clobber one register... brain | |
1100 | * damage alert! I don't think that we can execute any code in here in any | |
1101 | * other mode than FIQ... Ok you can switch to another mode, but you can't | |
1102 | * get out of that mode without clobbering one register. | |
1103 | */ | |
1104 | vector_fiq: | |
1105 | disable_fiq | |
1106 | subs pc, lr, #4 | |
1107 | ||
1108 | /*============================================================================= | |
1109 | * Address exception handler | |
1110 | *----------------------------------------------------------------------------- | |
1111 | * These aren't too critical. | |
1112 | * (they're not supposed to happen, and won't happen in 32-bit data mode). | |
1113 | */ | |
1114 | ||
1115 | vector_addrexcptn: | |
1116 | b vector_addrexcptn | |
1117 | ||
1118 | /* | |
1119 | * We group all the following data together to optimise | |
1120 | * for CPUs with separate I & D caches. | |
1121 | */ | |
1122 | .align 5 | |
1123 | ||
1124 | .LCvswi: | |
1125 | .word vector_swi | |
1126 | ||
7933523d | 1127 | .globl __stubs_end |
1da177e4 LT |
1128 | __stubs_end: |
1129 | ||
7933523d | 1130 | .equ stubs_offset, __vectors_start + 0x200 - __stubs_start |
1da177e4 | 1131 | |
7933523d RK |
1132 | .globl __vectors_start |
1133 | __vectors_start: | |
1da177e4 | 1134 | swi SYS_ERROR0 |
7933523d RK |
1135 | b vector_und + stubs_offset |
1136 | ldr pc, .LCvswi + stubs_offset | |
1137 | b vector_pabt + stubs_offset | |
1138 | b vector_dabt + stubs_offset | |
1139 | b vector_addrexcptn + stubs_offset | |
1140 | b vector_irq + stubs_offset | |
1141 | b vector_fiq + stubs_offset | |
1142 | ||
1143 | .globl __vectors_end | |
1144 | __vectors_end: | |
1da177e4 LT |
1145 | |
1146 | .data | |
1147 | ||
1da177e4 LT |
1148 | .globl cr_alignment |
1149 | .globl cr_no_alignment | |
1150 | cr_alignment: | |
1151 | .space 4 | |
1152 | cr_no_alignment: | |
1153 | .space 4 |