ARM: entry: avoid enabling interrupts in prefetch/data abort handlers
[deliverable/linux.git] / arch / arm / kernel / entry-armv.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/entry-armv.S
3 *
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
afeb90ca 6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
1da177e4
LT
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Low-level vector interface routines
13 *
70b6f2b4
NP
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
15 * that causes it to save wrong values... Be aware!
1da177e4 16 */
1da177e4 17
f09b9979 18#include <asm/memory.h>
753790e7
RK
19#include <asm/glue-df.h>
20#include <asm/glue-pf.h>
1da177e4 21#include <asm/vfpmacros.h>
a09e64fb 22#include <mach/entry-macro.S>
d6551e88 23#include <asm/thread_notify.h>
c4c5716e 24#include <asm/unwind.h>
cc20d429 25#include <asm/unistd.h>
f159f4ed 26#include <asm/tls.h>
1da177e4
LT
27
28#include "entry-header.S"
cd544ce7 29#include <asm/entry-macro-multi.S>
1da177e4 30
187a51ad
RK
31/*
32 * Interrupt handling. Preserves r7, r8, r9
33 */
34 .macro irq_handler
52108641 35#ifdef CONFIG_MULTI_IRQ_HANDLER
36 ldr r5, =handle_arch_irq
37 mov r0, sp
38 ldr r5, [r5]
39 adr lr, BSYM(9997f)
40 teq r5, #0
41 movne pc, r5
37ee16ae 42#endif
cd544ce7 43 arch_irq_handler_default
f00ec48f 449997:
187a51ad
RK
45 .endm
46
ac8b9c1c 47 .macro pabt_helper
8b418616 48 @ PABORT handler takes fault address in r4
ac8b9c1c 49#ifdef MULTI_PABORT
0402bece 50 ldr ip, .LCprocfns
ac8b9c1c 51 mov lr, pc
0402bece 52 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
ac8b9c1c
RK
53#else
54 bl CPU_PABORT_HANDLER
55#endif
56 .endm
57
58 .macro dabt_helper
b059bdc3
RK
59 mov r2, r4
60 mov r3, r5
ac8b9c1c
RK
61
62 @
63 @ Call the processor-specific abort handler:
64 @
65 @ r2 - aborted context pc
66 @ r3 - aborted context cpsr
67 @
68 @ The abort handler must return the aborted address in r0, and
69 @ the fault status register in r1. r9 must be preserved.
70 @
71#ifdef MULTI_DABORT
0402bece 72 ldr ip, .LCprocfns
ac8b9c1c 73 mov lr, pc
0402bece 74 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
ac8b9c1c
RK
75#else
76 bl CPU_DABORT_HANDLER
77#endif
78 .endm
79
785d3cd2
NP
80#ifdef CONFIG_KPROBES
81 .section .kprobes.text,"ax",%progbits
82#else
83 .text
84#endif
85
1da177e4
LT
86/*
87 * Invalid mode handlers
88 */
ccea7a19
RK
89 .macro inv_entry, reason
90 sub sp, sp, #S_FRAME_SIZE
b86040a5
CM
91 ARM( stmib sp, {r1 - lr} )
92 THUMB( stmia sp, {r0 - r12} )
93 THUMB( str sp, [sp, #S_SP] )
94 THUMB( str lr, [sp, #S_LR] )
1da177e4
LT
95 mov r1, #\reason
96 .endm
97
98__pabt_invalid:
ccea7a19
RK
99 inv_entry BAD_PREFETCH
100 b common_invalid
93ed3970 101ENDPROC(__pabt_invalid)
1da177e4
LT
102
103__dabt_invalid:
ccea7a19
RK
104 inv_entry BAD_DATA
105 b common_invalid
93ed3970 106ENDPROC(__dabt_invalid)
1da177e4
LT
107
108__irq_invalid:
ccea7a19
RK
109 inv_entry BAD_IRQ
110 b common_invalid
93ed3970 111ENDPROC(__irq_invalid)
1da177e4
LT
112
113__und_invalid:
ccea7a19
RK
114 inv_entry BAD_UNDEFINSTR
115
116 @
117 @ XXX fall through to common_invalid
118 @
119
120@
121@ common_invalid - generic code for failed exception (re-entrant version of handlers)
122@
123common_invalid:
124 zero_fp
125
126 ldmia r0, {r4 - r6}
127 add r0, sp, #S_PC @ here for interlock avoidance
128 mov r7, #-1 @ "" "" "" ""
129 str r4, [sp] @ save preserved r0
130 stmia r0, {r5 - r7} @ lr_<exception>,
131 @ cpsr_<exception>, "old_r0"
1da177e4 132
1da177e4 133 mov r0, sp
1da177e4 134 b bad_mode
93ed3970 135ENDPROC(__und_invalid)
1da177e4
LT
136
137/*
138 * SVC mode handlers
139 */
2dede2d8
NP
140
141#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
142#define SPFIX(code...) code
143#else
144#define SPFIX(code...)
145#endif
146
d30a0c8b 147 .macro svc_entry, stack_hole=0
c4c5716e
CM
148 UNWIND(.fnstart )
149 UNWIND(.save {r0 - pc} )
b86040a5
CM
150 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
151#ifdef CONFIG_THUMB2_KERNEL
152 SPFIX( str r0, [sp] ) @ temporarily saved
153 SPFIX( mov r0, sp )
154 SPFIX( tst r0, #4 ) @ test original stack alignment
155 SPFIX( ldr r0, [sp] ) @ restored
156#else
2dede2d8 157 SPFIX( tst sp, #4 )
b86040a5
CM
158#endif
159 SPFIX( subeq sp, sp, #4 )
160 stmia sp, {r1 - r12}
ccea7a19 161
b059bdc3
RK
162 ldmia r0, {r3 - r5}
163 add r7, sp, #S_SP - 4 @ here for interlock avoidance
164 mov r6, #-1 @ "" "" "" ""
165 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
166 SPFIX( addeq r2, r2, #4 )
167 str r3, [sp, #-4]! @ save the "real" r0 copied
ccea7a19
RK
168 @ from the exception stack
169
b059bdc3 170 mov r3, lr
1da177e4
LT
171
172 @
173 @ We are now ready to fill in the remaining blanks on the stack:
174 @
b059bdc3
RK
175 @ r2 - sp_svc
176 @ r3 - lr_svc
177 @ r4 - lr_<exception>, already fixed up for correct return/restart
178 @ r5 - spsr_<exception>
179 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
1da177e4 180 @
b059bdc3 181 stmia r7, {r2 - r6}
1da177e4
LT
182 .endm
183
184 .align 5
185__dabt_svc:
ccea7a19 186 svc_entry
1da177e4 187
02fe2845
RK
188#ifdef CONFIG_TRACE_IRQFLAGS
189 bl trace_hardirqs_off
190#endif
1da177e4 191
ac8b9c1c 192 dabt_helper
1da177e4
LT
193
194 @
02fe2845 195 @ call main handler
1da177e4 196 @
1da177e4
LT
197 mov r2, sp
198 bl do_DataAbort
199
200 @
201 @ IRQs off again before pulling preserved data off the stack
202 @
ac78884e 203 disable_irq_notrace
1da177e4
LT
204
205 @
206 @ restore SPSR and restart the instruction
207 @
b059bdc3 208 ldr r5, [sp, #S_PSR]
02fe2845
RK
209#ifdef CONFIG_TRACE_IRQFLAGS
210 tst r5, #PSR_I_BIT
211 bleq trace_hardirqs_on
212 tst r5, #PSR_I_BIT
213 blne trace_hardirqs_off
214#endif
b059bdc3 215 svc_exit r5 @ return from exception
c4c5716e 216 UNWIND(.fnend )
93ed3970 217ENDPROC(__dabt_svc)
1da177e4
LT
218
219 .align 5
220__irq_svc:
ccea7a19
RK
221 svc_entry
222
ac78884e
RK
223#ifdef CONFIG_TRACE_IRQFLAGS
224 bl trace_hardirqs_off
225#endif
ccea7a19 226
187a51ad 227 irq_handler
1613cc11 228
1da177e4 229#ifdef CONFIG_PREEMPT
1613cc11
RK
230 get_thread_info tsk
231 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
706fdd9f 232 ldr r0, [tsk, #TI_FLAGS] @ get flags
28fab1a2
RK
233 teq r8, #0 @ if preempt count != 0
234 movne r0, #0 @ force flags to 0
1da177e4
LT
235 tst r0, #_TIF_NEED_RESCHED
236 blne svc_preempt
1da177e4 237#endif
b059bdc3 238 ldr r5, [sp, #S_PSR]
7ad1bcb2 239#ifdef CONFIG_TRACE_IRQFLAGS
fbab1c80
RK
240 @ The parent context IRQs must have been enabled to get here in
241 @ the first place, so there's no point checking the PSR I bit.
242 bl trace_hardirqs_on
7ad1bcb2 243#endif
b059bdc3 244 svc_exit r5 @ return from exception
c4c5716e 245 UNWIND(.fnend )
93ed3970 246ENDPROC(__irq_svc)
1da177e4
LT
247
248 .ltorg
249
250#ifdef CONFIG_PREEMPT
251svc_preempt:
28fab1a2 252 mov r8, lr
1da177e4 2531: bl preempt_schedule_irq @ irq en/disable is done inside
706fdd9f 254 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
1da177e4 255 tst r0, #_TIF_NEED_RESCHED
28fab1a2 256 moveq pc, r8 @ go again
1da177e4
LT
257 b 1b
258#endif
259
260 .align 5
261__und_svc:
d30a0c8b
NP
262#ifdef CONFIG_KPROBES
263 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
264 @ it obviously needs free stack space which then will belong to
265 @ the saved context.
266 svc_entry 64
267#else
ccea7a19 268 svc_entry
d30a0c8b 269#endif
1da177e4
LT
270
271 @
272 @ call emulation code, which returns using r9 if it has emulated
273 @ the instruction, or the more conventional lr if we are to treat
274 @ this as a real undefined instruction
275 @
276 @ r0 - instruction
277 @
83e686ea 278#ifndef CONFIG_THUMB2_KERNEL
b059bdc3 279 ldr r0, [r4, #-4]
83e686ea 280#else
b059bdc3 281 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
83e686ea
CM
282 and r9, r0, #0xf800
283 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0
b059bdc3 284 ldrhhs r9, [r4] @ bottom 16 bits
83e686ea
CM
285 orrhs r0, r9, r0, lsl #16
286#endif
b86040a5 287 adr r9, BSYM(1f)
b059bdc3 288 mov r2, r4
1da177e4
LT
289 bl call_fpe
290
291 mov r0, sp @ struct pt_regs *regs
292 bl do_undefinstr
293
294 @
295 @ IRQs off again before pulling preserved data off the stack
296 @
ac78884e 2971: disable_irq_notrace
1da177e4
LT
298
299 @
300 @ restore SPSR and restart the instruction
301 @
b059bdc3
RK
302 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
303 svc_exit r5 @ return from exception
c4c5716e 304 UNWIND(.fnend )
93ed3970 305ENDPROC(__und_svc)
1da177e4
LT
306
307 .align 5
308__pabt_svc:
ccea7a19 309 svc_entry
1da177e4 310
02fe2845
RK
311#ifdef CONFIG_TRACE_IRQFLAGS
312 bl trace_hardirqs_off
313#endif
1da177e4 314
ac8b9c1c 315 pabt_helper
4fb28474 316 mov r2, sp @ regs
1da177e4
LT
317 bl do_PrefetchAbort @ call abort handler
318
319 @
320 @ IRQs off again before pulling preserved data off the stack
321 @
ac78884e 322 disable_irq_notrace
1da177e4
LT
323
324 @
325 @ restore SPSR and restart the instruction
326 @
b059bdc3 327 ldr r5, [sp, #S_PSR]
02fe2845
RK
328#ifdef CONFIG_TRACE_IRQFLAGS
329 tst r5, #PSR_I_BIT
330 bleq trace_hardirqs_on
331 tst r5, #PSR_I_BIT
332 blne trace_hardirqs_off
333#endif
b059bdc3 334 svc_exit r5 @ return from exception
c4c5716e 335 UNWIND(.fnend )
93ed3970 336ENDPROC(__pabt_svc)
1da177e4
LT
337
338 .align 5
49f680ea
RK
339.LCcralign:
340 .word cr_alignment
48d7927b 341#ifdef MULTI_DABORT
1da177e4
LT
342.LCprocfns:
343 .word processor
344#endif
345.LCfp:
346 .word fp_enter
1da177e4
LT
347
348/*
349 * User mode handlers
2dede2d8
NP
350 *
351 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
1da177e4 352 */
2dede2d8
NP
353
354#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
355#error "sizeof(struct pt_regs) must be a multiple of 8"
356#endif
357
ccea7a19 358 .macro usr_entry
c4c5716e
CM
359 UNWIND(.fnstart )
360 UNWIND(.cantunwind ) @ don't unwind the user space
ccea7a19 361 sub sp, sp, #S_FRAME_SIZE
b86040a5
CM
362 ARM( stmib sp, {r1 - r12} )
363 THUMB( stmia sp, {r0 - r12} )
ccea7a19 364
b059bdc3 365 ldmia r0, {r3 - r5}
ccea7a19 366 add r0, sp, #S_PC @ here for interlock avoidance
b059bdc3 367 mov r6, #-1 @ "" "" "" ""
ccea7a19 368
b059bdc3 369 str r3, [sp] @ save the "real" r0 copied
ccea7a19 370 @ from the exception stack
1da177e4
LT
371
372 @
373 @ We are now ready to fill in the remaining blanks on the stack:
374 @
b059bdc3
RK
375 @ r4 - lr_<exception>, already fixed up for correct return/restart
376 @ r5 - spsr_<exception>
377 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
1da177e4
LT
378 @
379 @ Also, separately save sp_usr and lr_usr
380 @
b059bdc3 381 stmia r0, {r4 - r6}
b86040a5
CM
382 ARM( stmdb r0, {sp, lr}^ )
383 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
1da177e4
LT
384
385 @
386 @ Enable the alignment trap while in kernel mode
387 @
49f680ea 388 alignment_trap r0
1da177e4
LT
389
390 @
391 @ Clear FP to mark the first stack frame
392 @
393 zero_fp
394 .endm
395
b49c0f24
NP
396 .macro kuser_cmpxchg_check
397#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
398#ifndef CONFIG_MMU
399#warning "NPTL on non MMU needs fixing"
400#else
401 @ Make sure our user space atomic helper is restarted
402 @ if it was interrupted in a critical region. Here we
403 @ perform a quick test inline since it should be false
404 @ 99.9999% of the time. The rest is done out of line.
b059bdc3 405 cmp r4, #TASK_SIZE
b49c0f24
NP
406 blhs kuser_cmpxchg_fixup
407#endif
408#endif
409 .endm
410
1da177e4
LT
411 .align 5
412__dabt_usr:
ccea7a19 413 usr_entry
b49c0f24 414 kuser_cmpxchg_check
ac8b9c1c 415 dabt_helper
1da177e4 416
1da177e4 417 mov r2, sp
b86040a5 418 adr lr, BSYM(ret_from_exception)
1da177e4 419 b do_DataAbort
c4c5716e 420 UNWIND(.fnend )
93ed3970 421ENDPROC(__dabt_usr)
1da177e4
LT
422
423 .align 5
424__irq_usr:
ccea7a19 425 usr_entry
b49c0f24 426 kuser_cmpxchg_check
1da177e4 427
9fc2552a
ML
428#ifdef CONFIG_IRQSOFF_TRACER
429 bl trace_hardirqs_off
430#endif
431
187a51ad 432 irq_handler
1613cc11 433 get_thread_info tsk
1da177e4 434 mov why, #0
9fc2552a 435 b ret_to_user_from_irq
c4c5716e 436 UNWIND(.fnend )
93ed3970 437ENDPROC(__irq_usr)
1da177e4
LT
438
439 .ltorg
440
441 .align 5
442__und_usr:
ccea7a19 443 usr_entry
b059bdc3
RK
444 mov r2, r4
445 mov r3, r5
1da177e4 446
1da177e4
LT
447 @
448 @ fall through to the emulation code, which returns using r9 if
449 @ it has emulated the instruction, or the more conventional lr
450 @ if we are to treat this as a real undefined instruction
451 @
452 @ r0 - instruction
453 @
b86040a5
CM
454 adr r9, BSYM(ret_from_exception)
455 adr lr, BSYM(__und_usr_unknown)
cb170a45 456 tst r3, #PSR_T_BIT @ Thumb mode?
b86040a5 457 itet eq @ explicit IT needed for the 1f label
cb170a45
PB
458 subeq r4, r2, #4 @ ARM instr at LR - 4
459 subne r4, r2, #2 @ Thumb instr at LR - 2
4601: ldreqt r0, [r4]
26584853
CM
461#ifdef CONFIG_CPU_ENDIAN_BE8
462 reveq r0, r0 @ little endian instruction
463#endif
cb170a45
PB
464 beq call_fpe
465 @ Thumb instruction
466#if __LINUX_ARM_ARCH__ >= 7
b86040a5
CM
4672:
468 ARM( ldrht r5, [r4], #2 )
469 THUMB( ldrht r5, [r4] )
470 THUMB( add r4, r4, #2 )
cb170a45
PB
471 and r0, r5, #0xf800 @ mask bits 111x x... .... ....
472 cmp r0, #0xe800 @ 32bit instruction if xx != 0
473 blo __und_usr_unknown
4743: ldrht r0, [r4]
475 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
476 orr r0, r0, r5, lsl #16
477#else
478 b __und_usr_unknown
479#endif
c4c5716e 480 UNWIND(.fnend )
93ed3970 481ENDPROC(__und_usr)
cb170a45 482
1da177e4
LT
483 @
484 @ fallthrough to call_fpe
485 @
486
487/*
488 * The out of line fixup for the ldrt above.
489 */
4260415f 490 .pushsection .fixup, "ax"
cb170a45 4914: mov pc, r9
4260415f
RK
492 .popsection
493 .pushsection __ex_table,"a"
cb170a45
PB
494 .long 1b, 4b
495#if __LINUX_ARM_ARCH__ >= 7
496 .long 2b, 4b
497 .long 3b, 4b
498#endif
4260415f 499 .popsection
1da177e4
LT
500
501/*
502 * Check whether the instruction is a co-processor instruction.
503 * If yes, we need to call the relevant co-processor handler.
504 *
505 * Note that we don't do a full check here for the co-processor
506 * instructions; all instructions with bit 27 set are well
507 * defined. The only instructions that should fault are the
508 * co-processor instructions. However, we have to watch out
509 * for the ARM6/ARM7 SWI bug.
510 *
b5872db4
CM
511 * NEON is a special case that has to be handled here. Not all
512 * NEON instructions are co-processor instructions, so we have
513 * to make a special case of checking for them. Plus, there's
514 * five groups of them, so we have a table of mask/opcode pairs
515 * to check against, and if any match then we branch off into the
516 * NEON handler code.
517 *
1da177e4
LT
518 * Emulators may wish to make use of the following registers:
519 * r0 = instruction opcode.
520 * r2 = PC+4
db6ccbb6 521 * r9 = normal "successful" return address
1da177e4 522 * r10 = this threads thread_info structure.
db6ccbb6 523 * lr = unrecognised instruction return address
1da177e4 524 */
cb170a45
PB
525 @
526 @ Fall-through from Thumb-2 __und_usr
527 @
528#ifdef CONFIG_NEON
529 adr r6, .LCneon_thumb_opcodes
530 b 2f
531#endif
1da177e4 532call_fpe:
b5872db4 533#ifdef CONFIG_NEON
cb170a45 534 adr r6, .LCneon_arm_opcodes
b5872db4
CM
5352:
536 ldr r7, [r6], #4 @ mask value
537 cmp r7, #0 @ end mask?
538 beq 1f
539 and r8, r0, r7
540 ldr r7, [r6], #4 @ opcode bits matching in mask
541 cmp r8, r7 @ NEON instruction?
542 bne 2b
543 get_thread_info r10
544 mov r7, #1
545 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
546 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
547 b do_vfp @ let VFP handler handle this
5481:
549#endif
1da177e4 550 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
cb170a45 551 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
1da177e4
LT
552#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
553 and r8, r0, #0x0f000000 @ mask out op-code bits
554 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
555#endif
556 moveq pc, lr
557 get_thread_info r10 @ get current thread
558 and r8, r0, #0x00000f00 @ mask out CP number
b86040a5 559 THUMB( lsr r8, r8, #8 )
1da177e4
LT
560 mov r7, #1
561 add r6, r10, #TI_USED_CP
b86040a5
CM
562 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
563 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
1da177e4
LT
564#ifdef CONFIG_IWMMXT
565 @ Test if we need to give access to iWMMXt coprocessors
566 ldr r5, [r10, #TI_FLAGS]
567 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
568 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
569 bcs iwmmxt_task_enable
570#endif
b86040a5
CM
571 ARM( add pc, pc, r8, lsr #6 )
572 THUMB( lsl r8, r8, #2 )
573 THUMB( add pc, r8 )
574 nop
575
a771fe6e 576 movw_pc lr @ CP#0
b86040a5
CM
577 W(b) do_fpe @ CP#1 (FPE)
578 W(b) do_fpe @ CP#2 (FPE)
a771fe6e 579 movw_pc lr @ CP#3
c17fad11
LB
580#ifdef CONFIG_CRUNCH
581 b crunch_task_enable @ CP#4 (MaverickCrunch)
582 b crunch_task_enable @ CP#5 (MaverickCrunch)
583 b crunch_task_enable @ CP#6 (MaverickCrunch)
584#else
a771fe6e
CM
585 movw_pc lr @ CP#4
586 movw_pc lr @ CP#5
587 movw_pc lr @ CP#6
c17fad11 588#endif
a771fe6e
CM
589 movw_pc lr @ CP#7
590 movw_pc lr @ CP#8
591 movw_pc lr @ CP#9
1da177e4 592#ifdef CONFIG_VFP
b86040a5
CM
593 W(b) do_vfp @ CP#10 (VFP)
594 W(b) do_vfp @ CP#11 (VFP)
1da177e4 595#else
a771fe6e
CM
596 movw_pc lr @ CP#10 (VFP)
597 movw_pc lr @ CP#11 (VFP)
1da177e4 598#endif
a771fe6e
CM
599 movw_pc lr @ CP#12
600 movw_pc lr @ CP#13
601 movw_pc lr @ CP#14 (Debug)
602 movw_pc lr @ CP#15 (Control)
1da177e4 603
b5872db4
CM
604#ifdef CONFIG_NEON
605 .align 6
606
cb170a45 607.LCneon_arm_opcodes:
b5872db4
CM
608 .word 0xfe000000 @ mask
609 .word 0xf2000000 @ opcode
610
611 .word 0xff100000 @ mask
612 .word 0xf4000000 @ opcode
613
cb170a45
PB
614 .word 0x00000000 @ mask
615 .word 0x00000000 @ opcode
616
617.LCneon_thumb_opcodes:
618 .word 0xef000000 @ mask
619 .word 0xef000000 @ opcode
620
621 .word 0xff100000 @ mask
622 .word 0xf9000000 @ opcode
623
b5872db4
CM
624 .word 0x00000000 @ mask
625 .word 0x00000000 @ opcode
626#endif
627
1da177e4 628do_fpe:
5d25ac03 629 enable_irq
1da177e4
LT
630 ldr r4, .LCfp
631 add r10, r10, #TI_FPSTATE @ r10 = workspace
632 ldr pc, [r4] @ Call FP module USR entry point
633
634/*
635 * The FP module is called with these registers set:
636 * r0 = instruction
637 * r2 = PC+4
638 * r9 = normal "successful" return address
639 * r10 = FP workspace
640 * lr = unrecognised FP instruction return address
641 */
642
124efc27 643 .pushsection .data
1da177e4 644ENTRY(fp_enter)
db6ccbb6 645 .word no_fp
124efc27 646 .popsection
1da177e4 647
83e686ea
CM
648ENTRY(no_fp)
649 mov pc, lr
650ENDPROC(no_fp)
db6ccbb6
RK
651
652__und_usr_unknown:
ecbab71c 653 enable_irq
1da177e4 654 mov r0, sp
b86040a5 655 adr lr, BSYM(ret_from_exception)
1da177e4 656 b do_undefinstr
93ed3970 657ENDPROC(__und_usr_unknown)
1da177e4
LT
658
659 .align 5
660__pabt_usr:
ccea7a19 661 usr_entry
ac8b9c1c 662 pabt_helper
4fb28474 663 mov r2, sp @ regs
1da177e4 664 bl do_PrefetchAbort @ call abort handler
c4c5716e 665 UNWIND(.fnend )
1da177e4
LT
666 /* fall through */
667/*
668 * This is the return code to user mode for abort handlers
669 */
670ENTRY(ret_from_exception)
c4c5716e
CM
671 UNWIND(.fnstart )
672 UNWIND(.cantunwind )
1da177e4
LT
673 get_thread_info tsk
674 mov why, #0
675 b ret_to_user
c4c5716e 676 UNWIND(.fnend )
93ed3970
CM
677ENDPROC(__pabt_usr)
678ENDPROC(ret_from_exception)
1da177e4
LT
679
680/*
681 * Register switch for ARMv3 and ARMv4 processors
682 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
683 * previous and next are guaranteed not to be the same.
684 */
685ENTRY(__switch_to)
c4c5716e
CM
686 UNWIND(.fnstart )
687 UNWIND(.cantunwind )
1da177e4
LT
688 add ip, r1, #TI_CPU_SAVE
689 ldr r3, [r2, #TI_TP_VALUE]
b86040a5
CM
690 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
691 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
692 THUMB( str sp, [ip], #4 )
693 THUMB( str lr, [ip], #4 )
247055aa 694#ifdef CONFIG_CPU_USE_DOMAINS
d6551e88 695 ldr r6, [r2, #TI_CPU_DOMAIN]
afeb90ca 696#endif
f159f4ed 697 set_tls r3, r4, r5
df0698be
NP
698#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
699 ldr r7, [r2, #TI_TASK]
700 ldr r8, =__stack_chk_guard
701 ldr r7, [r7, #TSK_STACK_CANARY]
702#endif
247055aa 703#ifdef CONFIG_CPU_USE_DOMAINS
1da177e4 704 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
1da177e4 705#endif
d6551e88
RK
706 mov r5, r0
707 add r4, r2, #TI_CPU_SAVE
708 ldr r0, =thread_notify_head
709 mov r1, #THREAD_NOTIFY_SWITCH
710 bl atomic_notifier_call_chain
df0698be
NP
711#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
712 str r7, [r8]
713#endif
b86040a5 714 THUMB( mov ip, r4 )
d6551e88 715 mov r0, r5
b86040a5
CM
716 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
717 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
718 THUMB( ldr sp, [ip], #4 )
719 THUMB( ldr pc, [ip] )
c4c5716e 720 UNWIND(.fnend )
93ed3970 721ENDPROC(__switch_to)
1da177e4
LT
722
723 __INIT
2d2669b6
NP
724
725/*
726 * User helpers.
727 *
728 * These are segment of kernel provided user code reachable from user space
729 * at a fixed address in kernel memory. This is used to provide user space
730 * with some operations which require kernel help because of unimplemented
731 * native feature and/or instructions in many ARM CPUs. The idea is for
732 * this code to be executed directly in user mode for best efficiency but
733 * which is too intimate with the kernel counter part to be left to user
734 * libraries. In fact this code might even differ from one CPU to another
735 * depending on the available instruction set and restrictions like on
736 * SMP systems. In other words, the kernel reserves the right to change
737 * this code as needed without warning. Only the entry points and their
738 * results are guaranteed to be stable.
739 *
740 * Each segment is 32-byte aligned and will be moved to the top of the high
741 * vector page. New segments (if ever needed) must be added in front of
742 * existing ones. This mechanism should be used only for things that are
743 * really small and justified, and not be abused freely.
744 *
745 * User space is expected to implement those things inline when optimizing
746 * for a processor that has the necessary native support, but only if such
747 * resulting binaries are already to be incompatible with earlier ARM
748 * processors due to the use of unsupported instructions other than what
749 * is provided here. In other words don't make binaries unable to run on
750 * earlier processors just for the sake of not using these kernel helpers
751 * if your compiled code is not going to use the new instructions for other
752 * purpose.
753 */
b86040a5 754 THUMB( .arm )
2d2669b6 755
ba9b5d76
NP
756 .macro usr_ret, reg
757#ifdef CONFIG_ARM_THUMB
758 bx \reg
759#else
760 mov pc, \reg
761#endif
762 .endm
763
2d2669b6
NP
764 .align 5
765 .globl __kuser_helper_start
766__kuser_helper_start:
767
7c612bfd
NP
768/*
769 * Reference prototype:
770 *
771 * void __kernel_memory_barrier(void)
772 *
773 * Input:
774 *
775 * lr = return address
776 *
777 * Output:
778 *
779 * none
780 *
781 * Clobbered:
782 *
b49c0f24 783 * none
7c612bfd
NP
784 *
785 * Definition and user space usage example:
786 *
787 * typedef void (__kernel_dmb_t)(void);
788 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
789 *
790 * Apply any needed memory barrier to preserve consistency with data modified
791 * manually and __kuser_cmpxchg usage.
792 *
793 * This could be used as follows:
794 *
795 * #define __kernel_dmb() \
796 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
6896eec0 797 * : : : "r0", "lr","cc" )
7c612bfd
NP
798 */
799
800__kuser_memory_barrier: @ 0xffff0fa0
ed3768a8 801 smp_dmb arm
ba9b5d76 802 usr_ret lr
7c612bfd
NP
803
804 .align 5
805
2d2669b6
NP
806/*
807 * Reference prototype:
808 *
809 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
810 *
811 * Input:
812 *
813 * r0 = oldval
814 * r1 = newval
815 * r2 = ptr
816 * lr = return address
817 *
818 * Output:
819 *
820 * r0 = returned value (zero or non-zero)
821 * C flag = set if r0 == 0, clear if r0 != 0
822 *
823 * Clobbered:
824 *
825 * r3, ip, flags
826 *
827 * Definition and user space usage example:
828 *
829 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
830 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
831 *
832 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
833 * Return zero if *ptr was changed or non-zero if no exchange happened.
834 * The C flag is also set if *ptr was changed to allow for assembly
835 * optimization in the calling code.
836 *
5964eae8
NP
837 * Notes:
838 *
839 * - This routine already includes memory barriers as needed.
840 *
2d2669b6
NP
841 * For example, a user space atomic_add implementation could look like this:
842 *
843 * #define atomic_add(ptr, val) \
844 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
845 * register unsigned int __result asm("r1"); \
846 * asm volatile ( \
847 * "1: @ atomic_add\n\t" \
848 * "ldr r0, [r2]\n\t" \
849 * "mov r3, #0xffff0fff\n\t" \
850 * "add lr, pc, #4\n\t" \
851 * "add r1, r0, %2\n\t" \
852 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
853 * "bcc 1b" \
854 * : "=&r" (__result) \
855 * : "r" (__ptr), "rIL" (val) \
856 * : "r0","r3","ip","lr","cc","memory" ); \
857 * __result; })
858 */
859
860__kuser_cmpxchg: @ 0xffff0fc0
861
dcef1f63 862#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2d2669b6 863
dcef1f63
NP
864 /*
865 * Poor you. No fast solution possible...
866 * The kernel itself must perform the operation.
867 * A special ghost syscall is used for that (see traps.c).
868 */
5e097445 869 stmfd sp!, {r7, lr}
55afd264 870 ldr r7, 1f @ it's 20 bits
cc20d429 871 swi __ARM_NR_cmpxchg
5e097445 872 ldmfd sp!, {r7, pc}
cc20d429 8731: .word __ARM_NR_cmpxchg
dcef1f63
NP
874
875#elif __LINUX_ARM_ARCH__ < 6
2d2669b6 876
b49c0f24
NP
877#ifdef CONFIG_MMU
878
2d2669b6 879 /*
b49c0f24
NP
880 * The only thing that can break atomicity in this cmpxchg
881 * implementation is either an IRQ or a data abort exception
882 * causing another process/thread to be scheduled in the middle
883 * of the critical sequence. To prevent this, code is added to
884 * the IRQ and data abort exception handlers to set the pc back
885 * to the beginning of the critical section if it is found to be
886 * within that critical section (see kuser_cmpxchg_fixup).
2d2669b6 887 */
b49c0f24
NP
8881: ldr r3, [r2] @ load current val
889 subs r3, r3, r0 @ compare with oldval
8902: streq r1, [r2] @ store newval if eq
891 rsbs r0, r3, #0 @ set return val and C flag
892 usr_ret lr
893
894 .text
895kuser_cmpxchg_fixup:
896 @ Called from kuser_cmpxchg_check macro.
b059bdc3 897 @ r4 = address of interrupted insn (must be preserved).
b49c0f24
NP
898 @ sp = saved regs. r7 and r8 are clobbered.
899 @ 1b = first critical insn, 2b = last critical insn.
b059bdc3 900 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
b49c0f24
NP
901 mov r7, #0xffff0fff
902 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
b059bdc3 903 subs r8, r4, r7
b49c0f24
NP
904 rsbcss r8, r8, #(2b - 1b)
905 strcs r7, [sp, #S_PC]
906 mov pc, lr
907 .previous
908
49bca4c2
NP
909#else
910#warning "NPTL on non MMU needs fixing"
911 mov r0, #-1
912 adds r0, r0, #0
ba9b5d76 913 usr_ret lr
b49c0f24 914#endif
2d2669b6
NP
915
916#else
917
ed3768a8 918 smp_dmb arm
b49c0f24 9191: ldrex r3, [r2]
2d2669b6
NP
920 subs r3, r3, r0
921 strexeq r3, r1, [r2]
b49c0f24
NP
922 teqeq r3, #1
923 beq 1b
2d2669b6 924 rsbs r0, r3, #0
b49c0f24 925 /* beware -- each __kuser slot must be 8 instructions max */
f00ec48f
RK
926 ALT_SMP(b __kuser_memory_barrier)
927 ALT_UP(usr_ret lr)
2d2669b6
NP
928
929#endif
930
931 .align 5
932
933/*
934 * Reference prototype:
935 *
936 * int __kernel_get_tls(void)
937 *
938 * Input:
939 *
940 * lr = return address
941 *
942 * Output:
943 *
944 * r0 = TLS value
945 *
946 * Clobbered:
947 *
b49c0f24 948 * none
2d2669b6
NP
949 *
950 * Definition and user space usage example:
951 *
952 * typedef int (__kernel_get_tls_t)(void);
953 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
954 *
955 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
956 *
957 * This could be used as follows:
958 *
959 * #define __kernel_get_tls() \
960 * ({ register unsigned int __val asm("r0"); \
961 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
962 * : "=r" (__val) : : "lr","cc" ); \
963 * __val; })
964 */
965
966__kuser_get_tls: @ 0xffff0fe0
f159f4ed 967 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
ba9b5d76 968 usr_ret lr
f159f4ed
TL
969 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
970 .rep 4
971 .word 0 @ 0xffff0ff0 software TLS value, then
972 .endr @ pad up to __kuser_helper_version
2d2669b6
NP
973
974/*
975 * Reference declaration:
976 *
977 * extern unsigned int __kernel_helper_version;
978 *
979 * Definition and user space usage example:
980 *
981 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
982 *
983 * User space may read this to determine the curent number of helpers
984 * available.
985 */
986
987__kuser_helper_version: @ 0xffff0ffc
988 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
989
990 .globl __kuser_helper_end
991__kuser_helper_end:
992
b86040a5 993 THUMB( .thumb )
2d2669b6 994
1da177e4
LT
995/*
996 * Vector stubs.
997 *
7933523d
RK
998 * This code is copied to 0xffff0200 so we can use branches in the
999 * vectors, rather than ldr's. Note that this code must not
1000 * exceed 0x300 bytes.
1da177e4
LT
1001 *
1002 * Common stub entry macro:
1003 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
ccea7a19
RK
1004 *
1005 * SP points to a minimal amount of processor-private memory, the address
1006 * of which is copied into r0 for the mode specific abort handler.
1da177e4 1007 */
b7ec4795 1008 .macro vector_stub, name, mode, correction=0
1da177e4
LT
1009 .align 5
1010
1011vector_\name:
1da177e4
LT
1012 .if \correction
1013 sub lr, lr, #\correction
1014 .endif
ccea7a19
RK
1015
1016 @
1017 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1018 @ (parent CPSR)
1019 @
1020 stmia sp, {r0, lr} @ save r0, lr
1da177e4 1021 mrs lr, spsr
ccea7a19
RK
1022 str lr, [sp, #8] @ save spsr
1023
1da177e4 1024 @
ccea7a19 1025 @ Prepare for SVC32 mode. IRQs remain disabled.
1da177e4 1026 @
ccea7a19 1027 mrs r0, cpsr
b86040a5 1028 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
ccea7a19 1029 msr spsr_cxsf, r0
1da177e4 1030
ccea7a19
RK
1031 @
1032 @ the branch table must immediately follow this code
1033 @
ccea7a19 1034 and lr, lr, #0x0f
b86040a5
CM
1035 THUMB( adr r0, 1f )
1036 THUMB( ldr lr, [r0, lr, lsl #2] )
b7ec4795 1037 mov r0, sp
b86040a5 1038 ARM( ldr lr, [pc, lr, lsl #2] )
ccea7a19 1039 movs pc, lr @ branch to handler in SVC mode
93ed3970 1040ENDPROC(vector_\name)
88987ef9
CM
1041
1042 .align 2
1043 @ handler addresses follow this label
10441:
1da177e4
LT
1045 .endm
1046
7933523d 1047 .globl __stubs_start
1da177e4
LT
1048__stubs_start:
1049/*
1050 * Interrupt dispatcher
1051 */
b7ec4795 1052 vector_stub irq, IRQ_MODE, 4
1da177e4
LT
1053
1054 .long __irq_usr @ 0 (USR_26 / USR_32)
1055 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1056 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1057 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1058 .long __irq_invalid @ 4
1059 .long __irq_invalid @ 5
1060 .long __irq_invalid @ 6
1061 .long __irq_invalid @ 7
1062 .long __irq_invalid @ 8
1063 .long __irq_invalid @ 9
1064 .long __irq_invalid @ a
1065 .long __irq_invalid @ b
1066 .long __irq_invalid @ c
1067 .long __irq_invalid @ d
1068 .long __irq_invalid @ e
1069 .long __irq_invalid @ f
1070
1071/*
1072 * Data abort dispatcher
1073 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1074 */
b7ec4795 1075 vector_stub dabt, ABT_MODE, 8
1da177e4
LT
1076
1077 .long __dabt_usr @ 0 (USR_26 / USR_32)
1078 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1079 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1080 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1081 .long __dabt_invalid @ 4
1082 .long __dabt_invalid @ 5
1083 .long __dabt_invalid @ 6
1084 .long __dabt_invalid @ 7
1085 .long __dabt_invalid @ 8
1086 .long __dabt_invalid @ 9
1087 .long __dabt_invalid @ a
1088 .long __dabt_invalid @ b
1089 .long __dabt_invalid @ c
1090 .long __dabt_invalid @ d
1091 .long __dabt_invalid @ e
1092 .long __dabt_invalid @ f
1093
1094/*
1095 * Prefetch abort dispatcher
1096 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1097 */
b7ec4795 1098 vector_stub pabt, ABT_MODE, 4
1da177e4
LT
1099
1100 .long __pabt_usr @ 0 (USR_26 / USR_32)
1101 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1102 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1103 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1104 .long __pabt_invalid @ 4
1105 .long __pabt_invalid @ 5
1106 .long __pabt_invalid @ 6
1107 .long __pabt_invalid @ 7
1108 .long __pabt_invalid @ 8
1109 .long __pabt_invalid @ 9
1110 .long __pabt_invalid @ a
1111 .long __pabt_invalid @ b
1112 .long __pabt_invalid @ c
1113 .long __pabt_invalid @ d
1114 .long __pabt_invalid @ e
1115 .long __pabt_invalid @ f
1116
1117/*
1118 * Undef instr entry dispatcher
1119 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1120 */
b7ec4795 1121 vector_stub und, UND_MODE
1da177e4
LT
1122
1123 .long __und_usr @ 0 (USR_26 / USR_32)
1124 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1125 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1126 .long __und_svc @ 3 (SVC_26 / SVC_32)
1127 .long __und_invalid @ 4
1128 .long __und_invalid @ 5
1129 .long __und_invalid @ 6
1130 .long __und_invalid @ 7
1131 .long __und_invalid @ 8
1132 .long __und_invalid @ 9
1133 .long __und_invalid @ a
1134 .long __und_invalid @ b
1135 .long __und_invalid @ c
1136 .long __und_invalid @ d
1137 .long __und_invalid @ e
1138 .long __und_invalid @ f
1139
1140 .align 5
1141
1142/*=============================================================================
1143 * Undefined FIQs
1144 *-----------------------------------------------------------------------------
1145 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1146 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1147 * Basically to switch modes, we *HAVE* to clobber one register... brain
1148 * damage alert! I don't think that we can execute any code in here in any
1149 * other mode than FIQ... Ok you can switch to another mode, but you can't
1150 * get out of that mode without clobbering one register.
1151 */
1152vector_fiq:
1153 disable_fiq
1154 subs pc, lr, #4
1155
1156/*=============================================================================
1157 * Address exception handler
1158 *-----------------------------------------------------------------------------
1159 * These aren't too critical.
1160 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1161 */
1162
1163vector_addrexcptn:
1164 b vector_addrexcptn
1165
1166/*
1167 * We group all the following data together to optimise
1168 * for CPUs with separate I & D caches.
1169 */
1170 .align 5
1171
1172.LCvswi:
1173 .word vector_swi
1174
7933523d 1175 .globl __stubs_end
1da177e4
LT
1176__stubs_end:
1177
7933523d 1178 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1da177e4 1179
7933523d
RK
1180 .globl __vectors_start
1181__vectors_start:
b86040a5
CM
1182 ARM( swi SYS_ERROR0 )
1183 THUMB( svc #0 )
1184 THUMB( nop )
1185 W(b) vector_und + stubs_offset
1186 W(ldr) pc, .LCvswi + stubs_offset
1187 W(b) vector_pabt + stubs_offset
1188 W(b) vector_dabt + stubs_offset
1189 W(b) vector_addrexcptn + stubs_offset
1190 W(b) vector_irq + stubs_offset
1191 W(b) vector_fiq + stubs_offset
7933523d
RK
1192
1193 .globl __vectors_end
1194__vectors_end:
1da177e4
LT
1195
1196 .data
1197
1da177e4
LT
1198 .globl cr_alignment
1199 .globl cr_no_alignment
1200cr_alignment:
1201 .space 4
1202cr_no_alignment:
1203 .space 4
52108641 1204
1205#ifdef CONFIG_MULTI_IRQ_HANDLER
1206 .globl handle_arch_irq
1207handle_arch_irq:
1208 .space 4
1209#endif
This page took 0.871748 seconds and 5 git commands to generate.