Commit | Line | Data |
---|---|---|
bce495d8 | 1 | #include <linux/init.h> |
1da177e4 LT |
2 | #include <linux/linkage.h> |
3 | ||
4 | #include <asm/assembler.h> | |
e6ae744d | 5 | #include <asm/asm-offsets.h> |
1da177e4 | 6 | #include <asm/errno.h> |
bce495d8 | 7 | #include <asm/thread_info.h> |
19c4d593 | 8 | #include <asm/v7m.h> |
1da177e4 LT |
9 | |
10 | @ Bad Abort numbers | |
11 | @ ----------------- | |
12 | @ | |
13 | #define BAD_PREFETCH 0 | |
14 | #define BAD_DATA 1 | |
15 | #define BAD_ADDREXCPTN 2 | |
16 | #define BAD_IRQ 3 | |
17 | #define BAD_UNDEFINSTR 4 | |
18 | ||
1da177e4 | 19 | @ |
925c8a1a RK |
20 | @ Most of the stack format comes from struct pt_regs, but with |
21 | @ the addition of 8 bytes for storing syscall args 5 and 6. | |
2dede2d8 | 22 | @ This _must_ remain a multiple of 8 for EABI. |
1da177e4 | 23 | @ |
1da177e4 LT |
24 | #define S_OFF 8 |
25 | ||
925c8a1a RK |
26 | /* |
27 | * The SWI code relies on the fact that R0 is at the bottom of the stack | |
28 | * (due to slow/fast restore user regs). | |
29 | */ | |
30 | #if S_R0 != 0 | |
31 | #error "Please fix" | |
32 | #endif | |
33 | ||
bce495d8 RK |
34 | .macro zero_fp |
35 | #ifdef CONFIG_FRAME_POINTER | |
36 | mov fp, #0 | |
37 | #endif | |
38 | .endm | |
39 | ||
49f680ea | 40 | .macro alignment_trap, rtemp |
1da177e4 | 41 | #ifdef CONFIG_ALIGNMENT_TRAP |
49f680ea RK |
42 | ldr \rtemp, .LCcralign |
43 | ldr \rtemp, [\rtemp] | |
1da177e4 LT |
44 | mcr p15, 0, \rtemp, c1, c0 |
45 | #endif | |
46 | .endm | |
47 | ||
19c4d593 UKK |
48 | #ifdef CONFIG_CPU_V7M |
49 | /* | |
50 | * ARMv7-M exception entry/exit macros. | |
51 | * | |
52 | * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are | |
53 | * automatically saved on the current stack (32 words) before | |
54 | * switching to the exception stack (SP_main). | |
55 | * | |
56 | * If exception is taken while in user mode, SP_main is | |
57 | * empty. Otherwise, SP_main is aligned to 64 bit automatically | |
58 | * (CCR.STKALIGN set). | |
59 | * | |
60 | * Linux assumes that the interrupts are disabled when entering an | |
61 | * exception handler and it may BUG if this is not the case. Interrupts | |
62 | * are disabled during entry and reenabled in the exit macro. | |
63 | * | |
64 | * v7m_exception_slow_exit is used when returning from SVC or PendSV. | |
65 | * When returning to kernel mode, we don't return from exception. | |
66 | */ | |
67 | .macro v7m_exception_entry | |
68 | @ determine the location of the registers saved by the core during | |
69 | @ exception entry. Depending on the mode the cpu was in when the | |
70 | @ exception happend that is either on the main or the process stack. | |
71 | @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack | |
72 | @ was used. | |
73 | tst lr, #EXC_RET_STACK_MASK | |
74 | mrsne r12, psp | |
75 | moveq r12, sp | |
76 | ||
77 | @ we cannot rely on r0-r3 and r12 matching the value saved in the | |
78 | @ exception frame because of tail-chaining. So these have to be | |
79 | @ reloaded. | |
80 | ldmia r12!, {r0-r3} | |
81 | ||
82 | @ Linux expects to have irqs off. Do it here before taking stack space | |
83 | cpsid i | |
84 | ||
85 | sub sp, #S_FRAME_SIZE-S_IP | |
86 | stmdb sp!, {r0-r11} | |
87 | ||
88 | @ load saved r12, lr, return address and xPSR. | |
89 | @ r0-r7 are used for signals and never touched from now on. Clobbering | |
90 | @ r8-r12 is OK. | |
91 | mov r9, r12 | |
92 | ldmia r9!, {r8, r10-r12} | |
93 | ||
94 | @ calculate the original stack pointer value. | |
95 | @ r9 currently points to the memory location just above the auto saved | |
96 | @ xPSR. | |
97 | @ The cpu might automatically 8-byte align the stack. Bit 9 | |
98 | @ of the saved xPSR specifies if stack aligning took place. In this case | |
99 | @ another 32-bit value is included in the stack. | |
100 | ||
101 | tst r12, V7M_xPSR_FRAMEPTRALIGN | |
102 | addne r9, r9, #4 | |
103 | ||
104 | @ store saved r12 using str to have a register to hold the base for stm | |
105 | str r8, [sp, #S_IP] | |
106 | add r8, sp, #S_SP | |
107 | @ store r13-r15, xPSR | |
108 | stmia r8!, {r9-r12} | |
109 | @ store old_r0 | |
110 | str r0, [r8] | |
111 | .endm | |
112 | ||
113 | /* | |
114 | * PENDSV and SVCALL are configured to have the same exception | |
115 | * priorities. As a kernel thread runs at SVCALL execution priority it | |
116 | * can never be preempted and so we will never have to return to a | |
117 | * kernel thread here. | |
118 | */ | |
119 | .macro v7m_exception_slow_exit ret_r0 | |
120 | cpsid i | |
121 | ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK | |
122 | ||
123 | @ read original r12, sp, lr, pc and xPSR | |
124 | add r12, sp, #S_IP | |
125 | ldmia r12, {r1-r5} | |
126 | ||
127 | @ an exception frame is always 8-byte aligned. To tell the hardware if | |
128 | @ the sp to be restored is aligned or not set bit 9 of the saved xPSR | |
129 | @ accordingly. | |
130 | tst r2, #4 | |
131 | subne r2, r2, #4 | |
132 | orrne r5, V7M_xPSR_FRAMEPTRALIGN | |
133 | biceq r5, V7M_xPSR_FRAMEPTRALIGN | |
134 | ||
135 | @ write basic exception frame | |
136 | stmdb r2!, {r1, r3-r5} | |
137 | ldmia sp, {r1, r3-r5} | |
138 | .if \ret_r0 | |
139 | stmdb r2!, {r0, r3-r5} | |
140 | .else | |
141 | stmdb r2!, {r1, r3-r5} | |
142 | .endif | |
143 | ||
144 | @ restore process sp | |
145 | msr psp, r2 | |
146 | ||
147 | @ restore original r4-r11 | |
148 | ldmia sp!, {r0-r11} | |
149 | ||
150 | @ restore main sp | |
151 | add sp, sp, #S_FRAME_SIZE-S_IP | |
152 | ||
153 | cpsie i | |
154 | bx lr | |
155 | .endm | |
156 | #endif /* CONFIG_CPU_V7M */ | |
157 | ||
b86040a5 CM |
158 | @ |
159 | @ Store/load the USER SP and LR registers by switching to the SYS | |
160 | @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not | |
161 | @ available. Should only be called from SVC mode | |
162 | @ | |
163 | .macro store_user_sp_lr, rd, rtemp, offset = 0 | |
164 | mrs \rtemp, cpsr | |
165 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) | |
166 | msr cpsr_c, \rtemp @ switch to the SYS mode | |
167 | ||
168 | str sp, [\rd, #\offset] @ save sp_usr | |
169 | str lr, [\rd, #\offset + 4] @ save lr_usr | |
170 | ||
171 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) | |
172 | msr cpsr_c, \rtemp @ switch back to the SVC mode | |
173 | .endm | |
174 | ||
175 | .macro load_user_sp_lr, rd, rtemp, offset = 0 | |
176 | mrs \rtemp, cpsr | |
177 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) | |
178 | msr cpsr_c, \rtemp @ switch to the SYS mode | |
179 | ||
180 | ldr sp, [\rd, #\offset] @ load sp_usr | |
181 | ldr lr, [\rd, #\offset + 4] @ load lr_usr | |
182 | ||
183 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) | |
184 | msr cpsr_c, \rtemp @ switch back to the SVC mode | |
185 | .endm | |
186 | ||
187 | #ifndef CONFIG_THUMB2_KERNEL | |
9b56febe RK |
188 | .macro svc_exit, rpsr, irq = 0 |
189 | .if \irq != 0 | |
f8f02ec2 | 190 | @ IRQs already off |
9b56febe RK |
191 | #ifdef CONFIG_TRACE_IRQFLAGS |
192 | @ The parent context IRQs must have been enabled to get here in | |
193 | @ the first place, so there's no point checking the PSR I bit. | |
194 | bl trace_hardirqs_on | |
195 | #endif | |
196 | .else | |
f8f02ec2 RK |
197 | @ IRQs off again before pulling preserved data off the stack |
198 | disable_irq_notrace | |
9b56febe RK |
199 | #ifdef CONFIG_TRACE_IRQFLAGS |
200 | tst \rpsr, #PSR_I_BIT | |
201 | bleq trace_hardirqs_on | |
202 | tst \rpsr, #PSR_I_BIT | |
203 | blne trace_hardirqs_off | |
204 | #endif | |
205 | .endif | |
b86040a5 | 206 | msr spsr_cxsf, \rpsr |
7db44c75 | 207 | #if defined(CONFIG_CPU_V6) |
200b812d CM |
208 | ldr r0, [sp] |
209 | strex r1, r2, [sp] @ clear the exclusive monitor | |
210 | ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr | |
7db44c75 RK |
211 | #elif defined(CONFIG_CPU_32v6K) |
212 | clrex @ clear the exclusive monitor | |
213 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | |
9e6ec39b NP |
214 | #else |
215 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | |
200b812d | 216 | #endif |
b86040a5 CM |
217 | .endm |
218 | ||
219 | .macro restore_user_regs, fast = 0, offset = 0 | |
220 | ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr | |
221 | ldr lr, [sp, #\offset + S_PC]! @ get pc | |
222 | msr spsr_cxsf, r1 @ save in spsr_svc | |
7db44c75 | 223 | #if defined(CONFIG_CPU_V6) |
200b812d | 224 | strex r1, r2, [sp] @ clear the exclusive monitor |
7db44c75 RK |
225 | #elif defined(CONFIG_CPU_32v6K) |
226 | clrex @ clear the exclusive monitor | |
200b812d | 227 | #endif |
b86040a5 CM |
228 | .if \fast |
229 | ldmdb sp, {r1 - lr}^ @ get calling r1 - lr | |
230 | .else | |
231 | ldmdb sp, {r0 - lr}^ @ get calling r0 - lr | |
232 | .endif | |
8e4971f2 AG |
233 | mov r0, r0 @ ARMv5T and earlier require a nop |
234 | @ after ldm {}^ | |
b86040a5 CM |
235 | add sp, sp, #S_FRAME_SIZE - S_PC |
236 | movs pc, lr @ return & move spsr_svc into cpsr | |
237 | .endm | |
238 | ||
239 | .macro get_thread_info, rd | |
240 | mov \rd, sp, lsr #13 | |
241 | mov \rd, \rd, lsl #13 | |
242 | .endm | |
a771fe6e CM |
243 | |
244 | @ | |
245 | @ 32-bit wide "mov pc, reg" | |
246 | @ | |
247 | .macro movw_pc, reg | |
248 | mov pc, \reg | |
249 | .endm | |
b86040a5 | 250 | #else /* CONFIG_THUMB2_KERNEL */ |
9b56febe RK |
251 | .macro svc_exit, rpsr, irq = 0 |
252 | .if \irq != 0 | |
f8f02ec2 | 253 | @ IRQs already off |
9b56febe RK |
254 | #ifdef CONFIG_TRACE_IRQFLAGS |
255 | @ The parent context IRQs must have been enabled to get here in | |
256 | @ the first place, so there's no point checking the PSR I bit. | |
257 | bl trace_hardirqs_on | |
258 | #endif | |
259 | .else | |
f8f02ec2 RK |
260 | @ IRQs off again before pulling preserved data off the stack |
261 | disable_irq_notrace | |
9b56febe RK |
262 | #ifdef CONFIG_TRACE_IRQFLAGS |
263 | tst \rpsr, #PSR_I_BIT | |
264 | bleq trace_hardirqs_on | |
265 | tst \rpsr, #PSR_I_BIT | |
266 | blne trace_hardirqs_off | |
267 | #endif | |
268 | .endif | |
59481062 JM |
269 | ldr lr, [sp, #S_SP] @ top of the stack |
270 | ldrd r0, r1, [sp, #S_LR] @ calling lr and pc | |
200b812d | 271 | clrex @ clear the exclusive monitor |
59481062 | 272 | stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context |
b86040a5 | 273 | ldmia sp, {r0 - r12} |
59481062 JM |
274 | mov sp, lr |
275 | ldr lr, [sp], #4 | |
b86040a5 CM |
276 | rfeia sp! |
277 | .endm | |
278 | ||
19c4d593 UKK |
279 | #ifdef CONFIG_CPU_V7M |
280 | /* | |
281 | * Note we don't need to do clrex here as clearing the local monitor is | |
282 | * part of each exception entry and exit sequence. | |
283 | */ | |
284 | .macro restore_user_regs, fast = 0, offset = 0 | |
285 | .if \offset | |
286 | add sp, #\offset | |
287 | .endif | |
288 | v7m_exception_slow_exit ret_r0 = \fast | |
289 | .endm | |
290 | #else /* ifdef CONFIG_CPU_V7M */ | |
b86040a5 | 291 | .macro restore_user_regs, fast = 0, offset = 0 |
200b812d | 292 | clrex @ clear the exclusive monitor |
b86040a5 CM |
293 | mov r2, sp |
294 | load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr | |
295 | ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr | |
296 | ldr lr, [sp, #\offset + S_PC] @ get pc | |
297 | add sp, sp, #\offset + S_SP | |
298 | msr spsr_cxsf, r1 @ save in spsr_svc | |
299 | .if \fast | |
300 | ldmdb sp, {r1 - r12} @ get calling r1 - r12 | |
301 | .else | |
302 | ldmdb sp, {r0 - r12} @ get calling r0 - r12 | |
303 | .endif | |
304 | add sp, sp, #S_FRAME_SIZE - S_SP | |
305 | movs pc, lr @ return & move spsr_svc into cpsr | |
306 | .endm | |
19c4d593 | 307 | #endif /* ifdef CONFIG_CPU_V7M / else */ |
b86040a5 CM |
308 | |
309 | .macro get_thread_info, rd | |
310 | mov \rd, sp | |
311 | lsr \rd, \rd, #13 | |
312 | mov \rd, \rd, lsl #13 | |
313 | .endm | |
a771fe6e CM |
314 | |
315 | @ | |
316 | @ 32-bit wide "mov pc, reg" | |
317 | @ | |
318 | .macro movw_pc, reg | |
319 | mov pc, \reg | |
320 | nop | |
321 | .endm | |
b86040a5 | 322 | #endif /* !CONFIG_THUMB2_KERNEL */ |
1da177e4 | 323 | |
b0088480 KH |
324 | /* |
325 | * Context tracking subsystem. Used to instrument transitions | |
326 | * between user and kernel mode. | |
327 | */ | |
328 | .macro ct_user_exit, save = 1 | |
329 | #ifdef CONFIG_CONTEXT_TRACKING | |
330 | .if \save | |
331 | stmdb sp!, {r0-r3, ip, lr} | |
0c06a5d4 | 332 | bl context_tracking_user_exit |
b0088480 KH |
333 | ldmia sp!, {r0-r3, ip, lr} |
334 | .else | |
0c06a5d4 | 335 | bl context_tracking_user_exit |
b0088480 KH |
336 | .endif |
337 | #endif | |
338 | .endm | |
339 | ||
340 | .macro ct_user_enter, save = 1 | |
341 | #ifdef CONFIG_CONTEXT_TRACKING | |
342 | .if \save | |
343 | stmdb sp!, {r0-r3, ip, lr} | |
0c06a5d4 | 344 | bl context_tracking_user_enter |
b0088480 KH |
345 | ldmia sp!, {r0-r3, ip, lr} |
346 | .else | |
0c06a5d4 | 347 | bl context_tracking_user_enter |
b0088480 KH |
348 | .endif |
349 | #endif | |
350 | .endm | |
351 | ||
1da177e4 LT |
352 | /* |
353 | * These are the registers used in the syscall handler, and allow us to | |
354 | * have in theory up to 7 arguments to a function - r0 to r6. | |
355 | * | |
356 | * r7 is reserved for the system call number for thumb mode. | |
357 | * | |
358 | * Note that tbl == why is intentional. | |
359 | * | |
360 | * We must set at least "tsk" and "why" when calling ret_with_reschedule. | |
361 | */ | |
362 | scno .req r7 @ syscall number | |
363 | tbl .req r8 @ syscall table pointer | |
364 | why .req r8 @ Linux syscall (!= 0) | |
365 | tsk .req r9 @ current thread_info |