Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/entry-armv.S | |
3 | * | |
4 | * Copyright (C) 1996,1997,1998 Russell King. | |
5 | * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * Low-level vector interface routines | |
12 | * | |
13 | * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes | |
14 | * it to save wrong values... Be aware! | |
15 | */ | |
16 | #include <linux/config.h> | |
1da177e4 | 17 | |
1da177e4 | 18 | #include <asm/glue.h> |
1da177e4 | 19 | #include <asm/vfpmacros.h> |
41e46d6a NP |
20 | #include <asm/hardware.h> /* should be moved into entry-macro.S */ |
21 | #include <asm/arch/irqs.h> /* should be moved into entry-macro.S */ | |
bce495d8 | 22 | #include <asm/arch/entry-macro.S> |
1da177e4 LT |
23 | |
24 | #include "entry-header.S" | |
25 | ||
26 | /* | |
27 | * Invalid mode handlers | |
28 | */ | |
29 | .macro inv_entry, sym, reason | |
30 | sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go | |
31 | stmia sp, {r0 - lr} @ Save XXX r0 - lr | |
32 | ldr r4, .LC\sym | |
33 | mov r1, #\reason | |
34 | .endm | |
35 | ||
36 | __pabt_invalid: | |
37 | inv_entry abt, BAD_PREFETCH | |
38 | b 1f | |
39 | ||
40 | __dabt_invalid: | |
41 | inv_entry abt, BAD_DATA | |
42 | b 1f | |
43 | ||
44 | __irq_invalid: | |
45 | inv_entry irq, BAD_IRQ | |
46 | b 1f | |
47 | ||
48 | __und_invalid: | |
49 | inv_entry und, BAD_UNDEFINSTR | |
50 | ||
51 | 1: zero_fp | |
52 | ldmia r4, {r5 - r7} @ Get XXX pc, cpsr, old_r0 | |
53 | add r4, sp, #S_PC | |
54 | stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0 | |
55 | mov r0, sp | |
56 | and r2, r6, #31 @ int mode | |
57 | b bad_mode | |
58 | ||
59 | /* | |
60 | * SVC mode handlers | |
61 | */ | |
62 | .macro svc_entry, sym | |
63 | sub sp, sp, #S_FRAME_SIZE | |
64 | stmia sp, {r0 - r12} @ save r0 - r12 | |
65 | ldr r2, .LC\sym | |
66 | add r0, sp, #S_FRAME_SIZE | |
67 | ldmia r2, {r2 - r4} @ get pc, cpsr | |
68 | add r5, sp, #S_SP | |
69 | mov r1, lr | |
70 | ||
71 | @ | |
72 | @ We are now ready to fill in the remaining blanks on the stack: | |
73 | @ | |
74 | @ r0 - sp_svc | |
75 | @ r1 - lr_svc | |
76 | @ r2 - lr_<exception>, already fixed up for correct return/restart | |
77 | @ r3 - spsr_<exception> | |
78 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | |
79 | @ | |
80 | stmia r5, {r0 - r4} | |
81 | .endm | |
82 | ||
83 | .align 5 | |
84 | __dabt_svc: | |
85 | svc_entry abt | |
86 | ||
87 | @ | |
88 | @ get ready to re-enable interrupts if appropriate | |
89 | @ | |
90 | mrs r9, cpsr | |
91 | tst r3, #PSR_I_BIT | |
92 | biceq r9, r9, #PSR_I_BIT | |
93 | ||
94 | @ | |
95 | @ Call the processor-specific abort handler: | |
96 | @ | |
97 | @ r2 - aborted context pc | |
98 | @ r3 - aborted context cpsr | |
99 | @ | |
100 | @ The abort handler must return the aborted address in r0, and | |
101 | @ the fault status register in r1. r9 must be preserved. | |
102 | @ | |
103 | #ifdef MULTI_ABORT | |
104 | ldr r4, .LCprocfns | |
105 | mov lr, pc | |
106 | ldr pc, [r4] | |
107 | #else | |
108 | bl CPU_ABORT_HANDLER | |
109 | #endif | |
110 | ||
111 | @ | |
112 | @ set desired IRQ state, then call main handler | |
113 | @ | |
114 | msr cpsr_c, r9 | |
115 | mov r2, sp | |
116 | bl do_DataAbort | |
117 | ||
118 | @ | |
119 | @ IRQs off again before pulling preserved data off the stack | |
120 | @ | |
1ec42c0c | 121 | disable_irq |
1da177e4 LT |
122 | |
123 | @ | |
124 | @ restore SPSR and restart the instruction | |
125 | @ | |
126 | ldr r0, [sp, #S_PSR] | |
127 | msr spsr_cxsf, r0 | |
128 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | |
129 | ||
130 | .align 5 | |
131 | __irq_svc: | |
132 | svc_entry irq | |
133 | #ifdef CONFIG_PREEMPT | |
134 | get_thread_info r8 | |
135 | ldr r9, [r8, #TI_PREEMPT] @ get preempt count | |
136 | add r7, r9, #1 @ increment it | |
137 | str r7, [r8, #TI_PREEMPT] | |
138 | #endif | |
139 | 1: get_irqnr_and_base r0, r6, r5, lr | |
140 | movne r1, sp | |
141 | @ | |
142 | @ routine called with r0 = irq number, r1 = struct pt_regs * | |
143 | @ | |
144 | adrne lr, 1b | |
145 | bne asm_do_IRQ | |
146 | #ifdef CONFIG_PREEMPT | |
147 | ldr r0, [r8, #TI_FLAGS] @ get flags | |
148 | tst r0, #_TIF_NEED_RESCHED | |
149 | blne svc_preempt | |
150 | preempt_return: | |
151 | ldr r0, [r8, #TI_PREEMPT] @ read preempt value | |
152 | teq r0, r7 | |
153 | str r9, [r8, #TI_PREEMPT] @ restore preempt count | |
154 | strne r0, [r0, -r0] @ bug() | |
155 | #endif | |
156 | ldr r0, [sp, #S_PSR] @ irqs are already disabled | |
157 | msr spsr_cxsf, r0 | |
158 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | |
159 | ||
160 | .ltorg | |
161 | ||
162 | #ifdef CONFIG_PREEMPT | |
163 | svc_preempt: | |
164 | teq r9, #0 @ was preempt count = 0 | |
165 | ldreq r6, .LCirq_stat | |
166 | movne pc, lr @ no | |
167 | ldr r0, [r6, #4] @ local_irq_count | |
168 | ldr r1, [r6, #8] @ local_bh_count | |
169 | adds r0, r0, r1 | |
170 | movne pc, lr | |
171 | mov r7, #0 @ preempt_schedule_irq | |
172 | str r7, [r8, #TI_PREEMPT] @ expects preempt_count == 0 | |
173 | 1: bl preempt_schedule_irq @ irq en/disable is done inside | |
174 | ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS | |
175 | tst r0, #_TIF_NEED_RESCHED | |
176 | beq preempt_return @ go again | |
177 | b 1b | |
178 | #endif | |
179 | ||
180 | .align 5 | |
181 | __und_svc: | |
182 | svc_entry und | |
183 | ||
184 | @ | |
185 | @ call emulation code, which returns using r9 if it has emulated | |
186 | @ the instruction, or the more conventional lr if we are to treat | |
187 | @ this as a real undefined instruction | |
188 | @ | |
189 | @ r0 - instruction | |
190 | @ | |
191 | ldr r0, [r2, #-4] | |
192 | adr r9, 1f | |
193 | bl call_fpe | |
194 | ||
195 | mov r0, sp @ struct pt_regs *regs | |
196 | bl do_undefinstr | |
197 | ||
198 | @ | |
199 | @ IRQs off again before pulling preserved data off the stack | |
200 | @ | |
1ec42c0c | 201 | 1: disable_irq |
1da177e4 LT |
202 | |
203 | @ | |
204 | @ restore SPSR and restart the instruction | |
205 | @ | |
206 | ldr lr, [sp, #S_PSR] @ Get SVC cpsr | |
207 | msr spsr_cxsf, lr | |
208 | ldmia sp, {r0 - pc}^ @ Restore SVC registers | |
209 | ||
210 | .align 5 | |
211 | __pabt_svc: | |
212 | svc_entry abt | |
213 | ||
214 | @ | |
215 | @ re-enable interrupts if appropriate | |
216 | @ | |
217 | mrs r9, cpsr | |
218 | tst r3, #PSR_I_BIT | |
219 | biceq r9, r9, #PSR_I_BIT | |
220 | msr cpsr_c, r9 | |
221 | ||
222 | @ | |
223 | @ set args, then call main handler | |
224 | @ | |
225 | @ r0 - address of faulting instruction | |
226 | @ r1 - pointer to registers on stack | |
227 | @ | |
228 | mov r0, r2 @ address (pc) | |
229 | mov r1, sp @ regs | |
230 | bl do_PrefetchAbort @ call abort handler | |
231 | ||
232 | @ | |
233 | @ IRQs off again before pulling preserved data off the stack | |
234 | @ | |
1ec42c0c | 235 | disable_irq |
1da177e4 LT |
236 | |
237 | @ | |
238 | @ restore SPSR and restart the instruction | |
239 | @ | |
240 | ldr r0, [sp, #S_PSR] | |
241 | msr spsr_cxsf, r0 | |
242 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | |
243 | ||
244 | .align 5 | |
245 | .LCirq: | |
246 | .word __temp_irq | |
247 | .LCund: | |
248 | .word __temp_und | |
249 | .LCabt: | |
250 | .word __temp_abt | |
251 | #ifdef MULTI_ABORT | |
252 | .LCprocfns: | |
253 | .word processor | |
254 | #endif | |
255 | .LCfp: | |
256 | .word fp_enter | |
257 | #ifdef CONFIG_PREEMPT | |
258 | .LCirq_stat: | |
259 | .word irq_stat | |
260 | #endif | |
261 | ||
262 | /* | |
263 | * User mode handlers | |
264 | */ | |
265 | .macro usr_entry, sym | |
266 | sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go | |
267 | stmia sp, {r0 - r12} @ save r0 - r12 | |
268 | ldr r7, .LC\sym | |
269 | add r5, sp, #S_PC | |
270 | ldmia r7, {r2 - r4} @ Get USR pc, cpsr | |
271 | ||
2d2669b6 NP |
272 | #if __LINUX_ARM_ARCH__ < 6 |
273 | @ make sure our user space atomic helper is aborted | |
274 | cmp r2, #VIRT_OFFSET | |
275 | bichs r3, r3, #PSR_Z_BIT | |
276 | #endif | |
277 | ||
1da177e4 LT |
278 | @ |
279 | @ We are now ready to fill in the remaining blanks on the stack: | |
280 | @ | |
281 | @ r2 - lr_<exception>, already fixed up for correct return/restart | |
282 | @ r3 - spsr_<exception> | |
283 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | |
284 | @ | |
285 | @ Also, separately save sp_usr and lr_usr | |
286 | @ | |
287 | stmia r5, {r2 - r4} | |
288 | stmdb r5, {sp, lr}^ | |
289 | ||
290 | @ | |
291 | @ Enable the alignment trap while in kernel mode | |
292 | @ | |
293 | alignment_trap r7, r0, __temp_\sym | |
294 | ||
295 | @ | |
296 | @ Clear FP to mark the first stack frame | |
297 | @ | |
298 | zero_fp | |
299 | .endm | |
300 | ||
301 | .align 5 | |
302 | __dabt_usr: | |
303 | usr_entry abt | |
304 | ||
305 | @ | |
306 | @ Call the processor-specific abort handler: | |
307 | @ | |
308 | @ r2 - aborted context pc | |
309 | @ r3 - aborted context cpsr | |
310 | @ | |
311 | @ The abort handler must return the aborted address in r0, and | |
312 | @ the fault status register in r1. | |
313 | @ | |
314 | #ifdef MULTI_ABORT | |
315 | ldr r4, .LCprocfns | |
316 | mov lr, pc | |
317 | ldr pc, [r4] | |
318 | #else | |
319 | bl CPU_ABORT_HANDLER | |
320 | #endif | |
321 | ||
322 | @ | |
323 | @ IRQs on, then call the main handler | |
324 | @ | |
1ec42c0c | 325 | enable_irq |
1da177e4 LT |
326 | mov r2, sp |
327 | adr lr, ret_from_exception | |
328 | b do_DataAbort | |
329 | ||
330 | .align 5 | |
331 | __irq_usr: | |
332 | usr_entry irq | |
333 | ||
334 | #ifdef CONFIG_PREEMPT | |
335 | get_thread_info r8 | |
336 | ldr r9, [r8, #TI_PREEMPT] @ get preempt count | |
337 | add r7, r9, #1 @ increment it | |
338 | str r7, [r8, #TI_PREEMPT] | |
339 | #endif | |
340 | 1: get_irqnr_and_base r0, r6, r5, lr | |
341 | movne r1, sp | |
342 | adrne lr, 1b | |
343 | @ | |
344 | @ routine called with r0 = irq number, r1 = struct pt_regs * | |
345 | @ | |
346 | bne asm_do_IRQ | |
347 | #ifdef CONFIG_PREEMPT | |
348 | ldr r0, [r8, #TI_PREEMPT] | |
349 | teq r0, r7 | |
350 | str r9, [r8, #TI_PREEMPT] | |
351 | strne r0, [r0, -r0] | |
352 | mov tsk, r8 | |
353 | #else | |
354 | get_thread_info tsk | |
355 | #endif | |
356 | mov why, #0 | |
357 | b ret_to_user | |
358 | ||
359 | .ltorg | |
360 | ||
361 | .align 5 | |
362 | __und_usr: | |
363 | usr_entry und | |
364 | ||
365 | tst r3, #PSR_T_BIT @ Thumb mode? | |
366 | bne fpundefinstr @ ignore FP | |
367 | sub r4, r2, #4 | |
368 | ||
369 | @ | |
370 | @ fall through to the emulation code, which returns using r9 if | |
371 | @ it has emulated the instruction, or the more conventional lr | |
372 | @ if we are to treat this as a real undefined instruction | |
373 | @ | |
374 | @ r0 - instruction | |
375 | @ | |
376 | 1: ldrt r0, [r4] | |
377 | adr r9, ret_from_exception | |
378 | adr lr, fpundefinstr | |
379 | @ | |
380 | @ fallthrough to call_fpe | |
381 | @ | |
382 | ||
383 | /* | |
384 | * The out of line fixup for the ldrt above. | |
385 | */ | |
386 | .section .fixup, "ax" | |
387 | 2: mov pc, r9 | |
388 | .previous | |
389 | .section __ex_table,"a" | |
390 | .long 1b, 2b | |
391 | .previous | |
392 | ||
393 | /* | |
394 | * Check whether the instruction is a co-processor instruction. | |
395 | * If yes, we need to call the relevant co-processor handler. | |
396 | * | |
397 | * Note that we don't do a full check here for the co-processor | |
398 | * instructions; all instructions with bit 27 set are well | |
399 | * defined. The only instructions that should fault are the | |
400 | * co-processor instructions. However, we have to watch out | |
401 | * for the ARM6/ARM7 SWI bug. | |
402 | * | |
403 | * Emulators may wish to make use of the following registers: | |
404 | * r0 = instruction opcode. | |
405 | * r2 = PC+4 | |
406 | * r10 = this threads thread_info structure. | |
407 | */ | |
408 | call_fpe: | |
409 | tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 | |
410 | #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) | |
411 | and r8, r0, #0x0f000000 @ mask out op-code bits | |
412 | teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)? | |
413 | #endif | |
414 | moveq pc, lr | |
415 | get_thread_info r10 @ get current thread | |
416 | and r8, r0, #0x00000f00 @ mask out CP number | |
417 | mov r7, #1 | |
418 | add r6, r10, #TI_USED_CP | |
419 | strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[] | |
420 | #ifdef CONFIG_IWMMXT | |
421 | @ Test if we need to give access to iWMMXt coprocessors | |
422 | ldr r5, [r10, #TI_FLAGS] | |
423 | rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only | |
424 | movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) | |
425 | bcs iwmmxt_task_enable | |
426 | #endif | |
1ec42c0c | 427 | enable_irq |
1da177e4 LT |
428 | add pc, pc, r8, lsr #6 |
429 | mov r0, r0 | |
430 | ||
431 | mov pc, lr @ CP#0 | |
432 | b do_fpe @ CP#1 (FPE) | |
433 | b do_fpe @ CP#2 (FPE) | |
434 | mov pc, lr @ CP#3 | |
435 | mov pc, lr @ CP#4 | |
436 | mov pc, lr @ CP#5 | |
437 | mov pc, lr @ CP#6 | |
438 | mov pc, lr @ CP#7 | |
439 | mov pc, lr @ CP#8 | |
440 | mov pc, lr @ CP#9 | |
441 | #ifdef CONFIG_VFP | |
442 | b do_vfp @ CP#10 (VFP) | |
443 | b do_vfp @ CP#11 (VFP) | |
444 | #else | |
445 | mov pc, lr @ CP#10 (VFP) | |
446 | mov pc, lr @ CP#11 (VFP) | |
447 | #endif | |
448 | mov pc, lr @ CP#12 | |
449 | mov pc, lr @ CP#13 | |
450 | mov pc, lr @ CP#14 (Debug) | |
451 | mov pc, lr @ CP#15 (Control) | |
452 | ||
453 | do_fpe: | |
454 | ldr r4, .LCfp | |
455 | add r10, r10, #TI_FPSTATE @ r10 = workspace | |
456 | ldr pc, [r4] @ Call FP module USR entry point | |
457 | ||
458 | /* | |
459 | * The FP module is called with these registers set: | |
460 | * r0 = instruction | |
461 | * r2 = PC+4 | |
462 | * r9 = normal "successful" return address | |
463 | * r10 = FP workspace | |
464 | * lr = unrecognised FP instruction return address | |
465 | */ | |
466 | ||
467 | .data | |
468 | ENTRY(fp_enter) | |
469 | .word fpundefinstr | |
470 | .text | |
471 | ||
472 | fpundefinstr: | |
473 | mov r0, sp | |
474 | adr lr, ret_from_exception | |
475 | b do_undefinstr | |
476 | ||
477 | .align 5 | |
478 | __pabt_usr: | |
479 | usr_entry abt | |
480 | ||
1ec42c0c | 481 | enable_irq @ Enable interrupts |
1da177e4 LT |
482 | mov r0, r2 @ address (pc) |
483 | mov r1, sp @ regs | |
484 | bl do_PrefetchAbort @ call abort handler | |
485 | /* fall through */ | |
486 | /* | |
487 | * This is the return code to user mode for abort handlers | |
488 | */ | |
489 | ENTRY(ret_from_exception) | |
490 | get_thread_info tsk | |
491 | mov why, #0 | |
492 | b ret_to_user | |
493 | ||
494 | /* | |
495 | * Register switch for ARMv3 and ARMv4 processors | |
496 | * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info | |
497 | * previous and next are guaranteed not to be the same. | |
498 | */ | |
499 | ENTRY(__switch_to) | |
500 | add ip, r1, #TI_CPU_SAVE | |
501 | ldr r3, [r2, #TI_TP_VALUE] | |
502 | stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack | |
503 | ldr r6, [r2, #TI_CPU_DOMAIN]! | |
504 | #if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT) | |
505 | mra r4, r5, acc0 | |
506 | stmia ip, {r4, r5} | |
507 | #endif | |
4b0e07a5 | 508 | #if defined(CONFIG_HAS_TLS_REG) |
2d2669b6 | 509 | mcr p15, 0, r3, c13, c0, 3 @ set TLS register |
4b0e07a5 | 510 | #elif !defined(CONFIG_TLS_REG_EMUL) |
1da177e4 | 511 | mov r4, #0xffff0fff |
2d2669b6 NP |
512 | str r3, [r4, #-15] @ TLS val at 0xffff0ff0 |
513 | #endif | |
1da177e4 LT |
514 | mcr p15, 0, r6, c3, c0, 0 @ Set domain register |
515 | #ifdef CONFIG_VFP | |
516 | @ Always disable VFP so we can lazily save/restore the old | |
517 | @ state. This occurs in the context of the previous thread. | |
518 | VFPFMRX r4, FPEXC | |
519 | bic r4, r4, #FPEXC_ENABLE | |
520 | VFPFMXR FPEXC, r4 | |
521 | #endif | |
522 | #if defined(CONFIG_IWMMXT) | |
523 | bl iwmmxt_task_switch | |
524 | #elif defined(CONFIG_CPU_XSCALE) | |
525 | add r4, r2, #40 @ cpu_context_save->extra | |
526 | ldmib r4, {r4, r5} | |
527 | mar acc0, r4, r5 | |
528 | #endif | |
529 | ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously | |
530 | ||
531 | __INIT | |
2d2669b6 NP |
532 | |
533 | /* | |
534 | * User helpers. | |
535 | * | |
536 | * These are segment of kernel provided user code reachable from user space | |
537 | * at a fixed address in kernel memory. This is used to provide user space | |
538 | * with some operations which require kernel help because of unimplemented | |
539 | * native feature and/or instructions in many ARM CPUs. The idea is for | |
540 | * this code to be executed directly in user mode for best efficiency but | |
541 | * which is too intimate with the kernel counter part to be left to user | |
542 | * libraries. In fact this code might even differ from one CPU to another | |
543 | * depending on the available instruction set and restrictions like on | |
544 | * SMP systems. In other words, the kernel reserves the right to change | |
545 | * this code as needed without warning. Only the entry points and their | |
546 | * results are guaranteed to be stable. | |
547 | * | |
548 | * Each segment is 32-byte aligned and will be moved to the top of the high | |
549 | * vector page. New segments (if ever needed) must be added in front of | |
550 | * existing ones. This mechanism should be used only for things that are | |
551 | * really small and justified, and not be abused freely. | |
552 | * | |
553 | * User space is expected to implement those things inline when optimizing | |
554 | * for a processor that has the necessary native support, but only if such | |
555 | * resulting binaries are already to be incompatible with earlier ARM | |
556 | * processors due to the use of unsupported instructions other than what | |
557 | * is provided here. In other words don't make binaries unable to run on | |
558 | * earlier processors just for the sake of not using these kernel helpers | |
559 | * if your compiled code is not going to use the new instructions for other | |
560 | * purpose. | |
561 | */ | |
562 | ||
563 | .align 5 | |
564 | .globl __kuser_helper_start | |
565 | __kuser_helper_start: | |
566 | ||
567 | /* | |
568 | * Reference prototype: | |
569 | * | |
570 | * int __kernel_cmpxchg(int oldval, int newval, int *ptr) | |
571 | * | |
572 | * Input: | |
573 | * | |
574 | * r0 = oldval | |
575 | * r1 = newval | |
576 | * r2 = ptr | |
577 | * lr = return address | |
578 | * | |
579 | * Output: | |
580 | * | |
581 | * r0 = returned value (zero or non-zero) | |
582 | * C flag = set if r0 == 0, clear if r0 != 0 | |
583 | * | |
584 | * Clobbered: | |
585 | * | |
586 | * r3, ip, flags | |
587 | * | |
588 | * Definition and user space usage example: | |
589 | * | |
590 | * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); | |
591 | * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) | |
592 | * | |
593 | * Atomically store newval in *ptr if *ptr is equal to oldval for user space. | |
594 | * Return zero if *ptr was changed or non-zero if no exchange happened. | |
595 | * The C flag is also set if *ptr was changed to allow for assembly | |
596 | * optimization in the calling code. | |
597 | * | |
598 | * For example, a user space atomic_add implementation could look like this: | |
599 | * | |
600 | * #define atomic_add(ptr, val) \ | |
601 | * ({ register unsigned int *__ptr asm("r2") = (ptr); \ | |
602 | * register unsigned int __result asm("r1"); \ | |
603 | * asm volatile ( \ | |
604 | * "1: @ atomic_add\n\t" \ | |
605 | * "ldr r0, [r2]\n\t" \ | |
606 | * "mov r3, #0xffff0fff\n\t" \ | |
607 | * "add lr, pc, #4\n\t" \ | |
608 | * "add r1, r0, %2\n\t" \ | |
609 | * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ | |
610 | * "bcc 1b" \ | |
611 | * : "=&r" (__result) \ | |
612 | * : "r" (__ptr), "rIL" (val) \ | |
613 | * : "r0","r3","ip","lr","cc","memory" ); \ | |
614 | * __result; }) | |
615 | */ | |
616 | ||
617 | __kuser_cmpxchg: @ 0xffff0fc0 | |
618 | ||
619 | #if __LINUX_ARM_ARCH__ < 6 | |
620 | ||
621 | #ifdef CONFIG_SMP /* sanity check */ | |
622 | #error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?" | |
623 | #endif | |
624 | ||
625 | /* | |
626 | * Theory of operation: | |
627 | * | |
628 | * We set the Z flag before loading oldval. If ever an exception | |
629 | * occurs we can not be sure the loaded value will still be the same | |
630 | * when the exception returns, therefore the user exception handler | |
631 | * will clear the Z flag whenever the interrupted user code was | |
632 | * actually from the kernel address space (see the usr_entry macro). | |
633 | * | |
634 | * The post-increment on the str is used to prevent a race with an | |
635 | * exception happening just after the str instruction which would | |
636 | * clear the Z flag although the exchange was done. | |
637 | */ | |
638 | teq ip, ip @ set Z flag | |
639 | ldr ip, [r2] @ load current val | |
640 | add r3, r2, #1 @ prepare store ptr | |
641 | teqeq ip, r0 @ compare with oldval if still allowed | |
642 | streq r1, [r3, #-1]! @ store newval if still allowed | |
643 | subs r0, r2, r3 @ if r2 == r3 the str occured | |
644 | mov pc, lr | |
645 | ||
646 | #else | |
647 | ||
648 | ldrex r3, [r2] | |
649 | subs r3, r3, r0 | |
650 | strexeq r3, r1, [r2] | |
651 | rsbs r0, r3, #0 | |
652 | mov pc, lr | |
653 | ||
654 | #endif | |
655 | ||
656 | .align 5 | |
657 | ||
658 | /* | |
659 | * Reference prototype: | |
660 | * | |
661 | * int __kernel_get_tls(void) | |
662 | * | |
663 | * Input: | |
664 | * | |
665 | * lr = return address | |
666 | * | |
667 | * Output: | |
668 | * | |
669 | * r0 = TLS value | |
670 | * | |
671 | * Clobbered: | |
672 | * | |
673 | * the Z flag might be lost | |
674 | * | |
675 | * Definition and user space usage example: | |
676 | * | |
677 | * typedef int (__kernel_get_tls_t)(void); | |
678 | * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) | |
679 | * | |
680 | * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. | |
681 | * | |
682 | * This could be used as follows: | |
683 | * | |
684 | * #define __kernel_get_tls() \ | |
685 | * ({ register unsigned int __val asm("r0"); \ | |
686 | * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ | |
687 | * : "=r" (__val) : : "lr","cc" ); \ | |
688 | * __val; }) | |
689 | */ | |
690 | ||
691 | __kuser_get_tls: @ 0xffff0fe0 | |
692 | ||
4b0e07a5 | 693 | #if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL) |
2d2669b6 NP |
694 | |
695 | ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0 | |
696 | mov pc, lr | |
697 | ||
698 | #else | |
699 | ||
700 | mrc p15, 0, r0, c13, c0, 3 @ read TLS register | |
701 | mov pc, lr | |
702 | ||
703 | #endif | |
704 | ||
705 | .rep 5 | |
706 | .word 0 @ pad up to __kuser_helper_version | |
707 | .endr | |
708 | ||
709 | /* | |
710 | * Reference declaration: | |
711 | * | |
712 | * extern unsigned int __kernel_helper_version; | |
713 | * | |
714 | * Definition and user space usage example: | |
715 | * | |
716 | * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc) | |
717 | * | |
718 | * User space may read this to determine the curent number of helpers | |
719 | * available. | |
720 | */ | |
721 | ||
722 | __kuser_helper_version: @ 0xffff0ffc | |
723 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) | |
724 | ||
725 | .globl __kuser_helper_end | |
726 | __kuser_helper_end: | |
727 | ||
728 | ||
1da177e4 LT |
729 | /* |
730 | * Vector stubs. | |
731 | * | |
7933523d RK |
732 | * This code is copied to 0xffff0200 so we can use branches in the |
733 | * vectors, rather than ldr's. Note that this code must not | |
734 | * exceed 0x300 bytes. | |
1da177e4 LT |
735 | * |
736 | * Common stub entry macro: | |
737 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | |
738 | */ | |
739 | .macro vector_stub, name, sym, correction=0 | |
740 | .align 5 | |
741 | ||
742 | vector_\name: | |
743 | ldr r13, .LCs\sym | |
744 | .if \correction | |
745 | sub lr, lr, #\correction | |
746 | .endif | |
747 | str lr, [r13] @ save lr_IRQ | |
748 | mrs lr, spsr | |
749 | str lr, [r13, #4] @ save spsr_IRQ | |
750 | @ | |
751 | @ now branch to the relevant MODE handling routine | |
752 | @ | |
753 | mrs r13, cpsr | |
754 | bic r13, r13, #MODE_MASK | |
acaca3c9 | 755 | orr r13, r13, #SVC_MODE |
1da177e4 LT |
756 | msr spsr_cxsf, r13 @ switch to SVC_32 mode |
757 | ||
758 | and lr, lr, #15 | |
759 | ldr lr, [pc, lr, lsl #2] | |
760 | movs pc, lr @ Changes mode and branches | |
761 | .endm | |
762 | ||
7933523d | 763 | .globl __stubs_start |
1da177e4 LT |
764 | __stubs_start: |
765 | /* | |
766 | * Interrupt dispatcher | |
767 | */ | |
768 | vector_stub irq, irq, 4 | |
769 | ||
770 | .long __irq_usr @ 0 (USR_26 / USR_32) | |
771 | .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) | |
772 | .long __irq_invalid @ 2 (IRQ_26 / IRQ_32) | |
773 | .long __irq_svc @ 3 (SVC_26 / SVC_32) | |
774 | .long __irq_invalid @ 4 | |
775 | .long __irq_invalid @ 5 | |
776 | .long __irq_invalid @ 6 | |
777 | .long __irq_invalid @ 7 | |
778 | .long __irq_invalid @ 8 | |
779 | .long __irq_invalid @ 9 | |
780 | .long __irq_invalid @ a | |
781 | .long __irq_invalid @ b | |
782 | .long __irq_invalid @ c | |
783 | .long __irq_invalid @ d | |
784 | .long __irq_invalid @ e | |
785 | .long __irq_invalid @ f | |
786 | ||
787 | /* | |
788 | * Data abort dispatcher | |
789 | * Enter in ABT mode, spsr = USR CPSR, lr = USR PC | |
790 | */ | |
791 | vector_stub dabt, abt, 8 | |
792 | ||
793 | .long __dabt_usr @ 0 (USR_26 / USR_32) | |
794 | .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) | |
795 | .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32) | |
796 | .long __dabt_svc @ 3 (SVC_26 / SVC_32) | |
797 | .long __dabt_invalid @ 4 | |
798 | .long __dabt_invalid @ 5 | |
799 | .long __dabt_invalid @ 6 | |
800 | .long __dabt_invalid @ 7 | |
801 | .long __dabt_invalid @ 8 | |
802 | .long __dabt_invalid @ 9 | |
803 | .long __dabt_invalid @ a | |
804 | .long __dabt_invalid @ b | |
805 | .long __dabt_invalid @ c | |
806 | .long __dabt_invalid @ d | |
807 | .long __dabt_invalid @ e | |
808 | .long __dabt_invalid @ f | |
809 | ||
810 | /* | |
811 | * Prefetch abort dispatcher | |
812 | * Enter in ABT mode, spsr = USR CPSR, lr = USR PC | |
813 | */ | |
814 | vector_stub pabt, abt, 4 | |
815 | ||
816 | .long __pabt_usr @ 0 (USR_26 / USR_32) | |
817 | .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) | |
818 | .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32) | |
819 | .long __pabt_svc @ 3 (SVC_26 / SVC_32) | |
820 | .long __pabt_invalid @ 4 | |
821 | .long __pabt_invalid @ 5 | |
822 | .long __pabt_invalid @ 6 | |
823 | .long __pabt_invalid @ 7 | |
824 | .long __pabt_invalid @ 8 | |
825 | .long __pabt_invalid @ 9 | |
826 | .long __pabt_invalid @ a | |
827 | .long __pabt_invalid @ b | |
828 | .long __pabt_invalid @ c | |
829 | .long __pabt_invalid @ d | |
830 | .long __pabt_invalid @ e | |
831 | .long __pabt_invalid @ f | |
832 | ||
833 | /* | |
834 | * Undef instr entry dispatcher | |
835 | * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | |
836 | */ | |
837 | vector_stub und, und | |
838 | ||
839 | .long __und_usr @ 0 (USR_26 / USR_32) | |
840 | .long __und_invalid @ 1 (FIQ_26 / FIQ_32) | |
841 | .long __und_invalid @ 2 (IRQ_26 / IRQ_32) | |
842 | .long __und_svc @ 3 (SVC_26 / SVC_32) | |
843 | .long __und_invalid @ 4 | |
844 | .long __und_invalid @ 5 | |
845 | .long __und_invalid @ 6 | |
846 | .long __und_invalid @ 7 | |
847 | .long __und_invalid @ 8 | |
848 | .long __und_invalid @ 9 | |
849 | .long __und_invalid @ a | |
850 | .long __und_invalid @ b | |
851 | .long __und_invalid @ c | |
852 | .long __und_invalid @ d | |
853 | .long __und_invalid @ e | |
854 | .long __und_invalid @ f | |
855 | ||
856 | .align 5 | |
857 | ||
858 | /*============================================================================= | |
859 | * Undefined FIQs | |
860 | *----------------------------------------------------------------------------- | |
861 | * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC | |
862 | * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg. | |
863 | * Basically to switch modes, we *HAVE* to clobber one register... brain | |
864 | * damage alert! I don't think that we can execute any code in here in any | |
865 | * other mode than FIQ... Ok you can switch to another mode, but you can't | |
866 | * get out of that mode without clobbering one register. | |
867 | */ | |
868 | vector_fiq: | |
869 | disable_fiq | |
870 | subs pc, lr, #4 | |
871 | ||
872 | /*============================================================================= | |
873 | * Address exception handler | |
874 | *----------------------------------------------------------------------------- | |
875 | * These aren't too critical. | |
876 | * (they're not supposed to happen, and won't happen in 32-bit data mode). | |
877 | */ | |
878 | ||
879 | vector_addrexcptn: | |
880 | b vector_addrexcptn | |
881 | ||
882 | /* | |
883 | * We group all the following data together to optimise | |
884 | * for CPUs with separate I & D caches. | |
885 | */ | |
886 | .align 5 | |
887 | ||
888 | .LCvswi: | |
889 | .word vector_swi | |
890 | ||
891 | .LCsirq: | |
892 | .word __temp_irq | |
893 | .LCsund: | |
894 | .word __temp_und | |
895 | .LCsabt: | |
896 | .word __temp_abt | |
897 | ||
7933523d | 898 | .globl __stubs_end |
1da177e4 LT |
899 | __stubs_end: |
900 | ||
7933523d | 901 | .equ stubs_offset, __vectors_start + 0x200 - __stubs_start |
1da177e4 | 902 | |
7933523d RK |
903 | .globl __vectors_start |
904 | __vectors_start: | |
1da177e4 | 905 | swi SYS_ERROR0 |
7933523d RK |
906 | b vector_und + stubs_offset |
907 | ldr pc, .LCvswi + stubs_offset | |
908 | b vector_pabt + stubs_offset | |
909 | b vector_dabt + stubs_offset | |
910 | b vector_addrexcptn + stubs_offset | |
911 | b vector_irq + stubs_offset | |
912 | b vector_fiq + stubs_offset | |
913 | ||
914 | .globl __vectors_end | |
915 | __vectors_end: | |
1da177e4 LT |
916 | |
917 | .data | |
918 | ||
919 | /* | |
920 | * Do not reorder these, and do not insert extra data between... | |
921 | */ | |
922 | ||
923 | __temp_irq: | |
924 | .word 0 @ saved lr_irq | |
925 | .word 0 @ saved spsr_irq | |
926 | .word -1 @ old_r0 | |
927 | __temp_und: | |
928 | .word 0 @ Saved lr_und | |
929 | .word 0 @ Saved spsr_und | |
930 | .word -1 @ old_r0 | |
931 | __temp_abt: | |
932 | .word 0 @ Saved lr_abt | |
933 | .word 0 @ Saved spsr_abt | |
934 | .word -1 @ old_r0 | |
935 | ||
936 | .globl cr_alignment | |
937 | .globl cr_no_alignment | |
938 | cr_alignment: | |
939 | .space 4 | |
940 | cr_no_alignment: | |
941 | .space 4 |