Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/vfp/vfphw.S | |
3 | * | |
4 | * Copyright (C) 2004 ARM Limited. | |
5 | * Written by Deep Blue Solutions Limited. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This code is called from the kernel's undefined instruction trap. | |
12 | * r9 holds the return address for successful handling. | |
13 | * lr holds the return address for unrecognised instructions. | |
14 | * r10 points at the start of the private FP workspace in the thread structure | |
15 | * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h) | |
16 | */ | |
17 | #include <asm/thread_info.h> | |
18 | #include <asm/vfpmacros.h> | |
0cc41e4a | 19 | #include <linux/kern_levels.h> |
1da177e4 LT |
20 | #include "../kernel/entry-header.S" |
21 | ||
22 | .macro DBGSTR, str | |
23 | #ifdef DEBUG | |
24 | stmfd sp!, {r0-r3, ip, lr} | |
ded3ef0f | 25 | ldr r0, =1f |
1da177e4 | 26 | bl printk |
ded3ef0f RK |
27 | ldmfd sp!, {r0-r3, ip, lr} |
28 | ||
29 | .pushsection .rodata, "a" | |
30 | 1: .ascii KERN_DEBUG "VFP: \str\n" | |
31 | .byte 0 | |
32 | .previous | |
1da177e4 LT |
33 | #endif |
34 | .endm | |
35 | ||
36 | .macro DBGSTR1, str, arg | |
37 | #ifdef DEBUG | |
38 | stmfd sp!, {r0-r3, ip, lr} | |
39 | mov r1, \arg | |
ded3ef0f | 40 | ldr r0, =1f |
1da177e4 | 41 | bl printk |
ded3ef0f RK |
42 | ldmfd sp!, {r0-r3, ip, lr} |
43 | ||
44 | .pushsection .rodata, "a" | |
45 | 1: .ascii KERN_DEBUG "VFP: \str\n" | |
46 | .byte 0 | |
47 | .previous | |
1da177e4 LT |
48 | #endif |
49 | .endm | |
50 | ||
51 | .macro DBGSTR3, str, arg1, arg2, arg3 | |
52 | #ifdef DEBUG | |
53 | stmfd sp!, {r0-r3, ip, lr} | |
54 | mov r3, \arg3 | |
55 | mov r2, \arg2 | |
56 | mov r1, \arg1 | |
ded3ef0f | 57 | ldr r0, =1f |
1da177e4 | 58 | bl printk |
ded3ef0f RK |
59 | ldmfd sp!, {r0-r3, ip, lr} |
60 | ||
61 | .pushsection .rodata, "a" | |
62 | 1: .ascii KERN_DEBUG "VFP: \str\n" | |
63 | .byte 0 | |
64 | .previous | |
1da177e4 LT |
65 | #endif |
66 | .endm | |
67 | ||
68 | ||
69 | @ VFP hardware support entry point. | |
70 | @ | |
15ac49b6 RK |
71 | @ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) |
72 | @ r2 = PC value to resume execution after successful emulation | |
73 | @ r9 = normal "successful" return address | |
1da177e4 | 74 | @ r10 = vfp_state union |
c6428464 | 75 | @ r11 = CPU number |
15ac49b6 RK |
76 | @ lr = unrecognised instruction return address |
77 | @ IRQs enabled. | |
93ed3970 | 78 | ENTRY(vfp_support_entry) |
1da177e4 LT |
79 | DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 |
80 | ||
ab3da156 AB |
81 | ldr r3, [sp, #S_PSR] @ Neither lazy restore nor FP exceptions |
82 | and r3, r3, #MODE_MASK @ are supported in kernel mode | |
83 | teq r3, #USR_MODE | |
84 | bne vfp_kmode_exception @ Returns through lr | |
85 | ||
1da177e4 LT |
86 | VFPFMRX r1, FPEXC @ Is the VFP enabled? |
87 | DBGSTR1 "fpexc %08x", r1 | |
228adef1 | 88 | tst r1, #FPEXC_EN |
1da177e4 LT |
89 | bne look_for_VFP_exceptions @ VFP is already enabled |
90 | ||
91 | DBGSTR1 "enable %x", r10 | |
af61bdf0 | 92 | ldr r3, vfp_current_hw_state_address |
228adef1 | 93 | orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set |
af61bdf0 | 94 | ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer |
228adef1 | 95 | bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled |
08409c33 | 96 | cmp r4, r10 @ this thread owns the hw context? |
f8f2a852 RK |
97 | #ifndef CONFIG_SMP |
98 | @ For UP, checking that this thread owns the hw context is | |
99 | @ sufficient to determine that the hardware state is valid. | |
08409c33 | 100 | beq vfp_hw_state_valid |
1da177e4 | 101 | |
f8f2a852 RK |
102 | @ On UP, we lazily save the VFP context. As a different |
103 | @ thread wants ownership of the VFP hardware, save the old | |
104 | @ state if there was a previous (valid) owner. | |
105 | ||
1da177e4 LT |
106 | VFPFMXR FPEXC, r5 @ enable VFP, disable any pending |
107 | @ exceptions, so we can get at the | |
108 | @ rest of it | |
109 | ||
1da177e4 | 110 | DBGSTR1 "save old state %p", r4 |
f8f2a852 RK |
111 | cmp r4, #0 @ if the vfp_current_hw_state is NULL |
112 | beq vfp_reload_hw @ then the hw state needs reloading | |
25ebee02 | 113 | VFPFSTMIA r4, r5 @ save the working registers |
1da177e4 | 114 | VFPFMRX r5, FPSCR @ current status |
85d6943a | 115 | #ifndef CONFIG_CPU_FEROCEON |
c98929c0 | 116 | tst r1, #FPEXC_EX @ is there additional state to save? |
24b647a0 CM |
117 | beq 1f |
118 | VFPFMRX r6, FPINST @ FPINST (only if FPEXC.EX is set) | |
119 | tst r1, #FPEXC_FP2V @ is there an FPINST2 to read? | |
120 | beq 1f | |
121 | VFPFMRX r8, FPINST2 @ FPINST2 if needed (and present) | |
122 | 1: | |
85d6943a | 123 | #endif |
1da177e4 | 124 | stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 |
f8f2a852 RK |
125 | vfp_reload_hw: |
126 | ||
127 | #else | |
128 | @ For SMP, if this thread does not own the hw context, then we | |
129 | @ need to reload it. No need to save the old state as on SMP, | |
130 | @ we always save the state when we switch away from a thread. | |
131 | bne vfp_reload_hw | |
132 | ||
133 | @ This thread has ownership of the current hardware context. | |
134 | @ However, it may have been migrated to another CPU, in which | |
135 | @ case the saved state is newer than the hardware context. | |
136 | @ Check this by looking at the CPU number which the state was | |
137 | @ last loaded onto. | |
138 | ldr ip, [r10, #VFP_CPU] | |
139 | teq ip, r11 | |
140 | beq vfp_hw_state_valid | |
141 | ||
142 | vfp_reload_hw: | |
143 | @ We're loading this threads state into the VFP hardware. Update | |
144 | @ the CPU number which contains the most up to date VFP context. | |
145 | str r11, [r10, #VFP_CPU] | |
146 | ||
147 | VFPFMXR FPEXC, r5 @ enable VFP, disable any pending | |
148 | @ exceptions, so we can get at the | |
149 | @ rest of it | |
c6428464 | 150 | #endif |
1da177e4 | 151 | |
1da177e4 | 152 | DBGSTR1 "load state %p", r10 |
af61bdf0 | 153 | str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer |
1da177e4 | 154 | @ Load the saved state back into the VFP |
25ebee02 | 155 | VFPFLDMIA r10, r5 @ reload the working registers while |
1da177e4 | 156 | @ FPEXC is in a safe state |
80ed3547 | 157 | ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2 |
85d6943a | 158 | #ifndef CONFIG_CPU_FEROCEON |
c98929c0 | 159 | tst r1, #FPEXC_EX @ is there additional state to restore? |
24b647a0 CM |
160 | beq 1f |
161 | VFPFMXR FPINST, r6 @ restore FPINST (only if FPEXC.EX is set) | |
162 | tst r1, #FPEXC_FP2V @ is there an FPINST2 to write? | |
163 | beq 1f | |
164 | VFPFMXR FPINST2, r8 @ FPINST2 if needed (and present) | |
165 | 1: | |
85d6943a | 166 | #endif |
1da177e4 LT |
167 | VFPFMXR FPSCR, r5 @ restore status |
168 | ||
08409c33 RK |
169 | @ The context stored in the VFP hardware is up to date with this thread |
170 | vfp_hw_state_valid: | |
228adef1 | 171 | tst r1, #FPEXC_EX |
1da177e4 LT |
172 | bne process_exception @ might as well handle the pending |
173 | @ exception before retrying branch | |
174 | @ out before setting an FPEXC that | |
175 | @ stops us reading stuff | |
15ac49b6 RK |
176 | VFPFMXR FPEXC, r1 @ Restore FPEXC last |
177 | sub r2, r2, #4 @ Retry current instruction - if Thumb | |
178 | str r2, [sp, #S_PC] @ mode it's two 16-bit instructions, | |
179 | @ else it's one 32-bit instruction, so | |
180 | @ always subtract 4 from the following | |
181 | @ instruction address. | |
568dca15 | 182 | #ifdef CONFIG_PREEMPT_COUNT |
f2255be8 GD |
183 | get_thread_info r10 |
184 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count | |
185 | sub r11, r4, #1 @ decrement it | |
186 | str r11, [r10, #TI_PREEMPT] | |
187 | #endif | |
1da177e4 LT |
188 | mov pc, r9 @ we think we have handled things |
189 | ||
190 | ||
191 | look_for_VFP_exceptions: | |
c98929c0 CM |
192 | @ Check for synchronous or asynchronous exception |
193 | tst r1, #FPEXC_EX | FPEXC_DEX | |
1da177e4 | 194 | bne process_exception |
c98929c0 CM |
195 | @ On some implementations of the VFP subarch 1, setting FPSCR.IXE |
196 | @ causes all the CDP instructions to be bounced synchronously without | |
197 | @ setting the FPEXC.EX bit | |
1da177e4 | 198 | VFPFMRX r5, FPSCR |
c98929c0 | 199 | tst r5, #FPSCR_IXE |
1da177e4 LT |
200 | bne process_exception |
201 | ||
202 | @ Fall into hand on to next handler - appropriate coproc instr | |
203 | @ not recognised by VFP | |
204 | ||
205 | DBGSTR "not VFP" | |
568dca15 | 206 | #ifdef CONFIG_PREEMPT_COUNT |
f2255be8 GD |
207 | get_thread_info r10 |
208 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count | |
209 | sub r11, r4, #1 @ decrement it | |
210 | str r11, [r10, #TI_PREEMPT] | |
211 | #endif | |
1da177e4 LT |
212 | mov pc, lr |
213 | ||
214 | process_exception: | |
215 | DBGSTR "bounce" | |
1da177e4 LT |
216 | mov r2, sp @ nothing stacked - regdump is at TOS |
217 | mov lr, r9 @ setup for a return to the user code. | |
218 | ||
219 | @ Now call the C code to package up the bounce to the support code | |
220 | @ r0 holds the trigger instruction | |
221 | @ r1 holds the FPEXC value | |
222 | @ r2 pointer to register dump | |
c98929c0 | 223 | b VFP_bounce @ we have handled this - the support |
1da177e4 LT |
224 | @ code will raise an exception if |
225 | @ required. If not, the user code will | |
226 | @ retry the faulted instruction | |
93ed3970 | 227 | ENDPROC(vfp_support_entry) |
1da177e4 | 228 | |
93ed3970 | 229 | ENTRY(vfp_save_state) |
c6428464 CM |
230 | @ Save the current VFP state |
231 | @ r0 - save location | |
232 | @ r1 - FPEXC | |
233 | DBGSTR1 "save VFP state %p", r0 | |
25ebee02 | 234 | VFPFSTMIA r0, r2 @ save the working registers |
c6428464 | 235 | VFPFMRX r2, FPSCR @ current status |
c98929c0 | 236 | tst r1, #FPEXC_EX @ is there additional state to save? |
24b647a0 CM |
237 | beq 1f |
238 | VFPFMRX r3, FPINST @ FPINST (only if FPEXC.EX is set) | |
239 | tst r1, #FPEXC_FP2V @ is there an FPINST2 to read? | |
240 | beq 1f | |
241 | VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present) | |
242 | 1: | |
c6428464 CM |
243 | stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 |
244 | mov pc, lr | |
93ed3970 | 245 | ENDPROC(vfp_save_state) |
c6428464 | 246 | |
7eb25ebe | 247 | .align |
af61bdf0 RK |
248 | vfp_current_hw_state_address: |
249 | .word vfp_current_hw_state | |
1da177e4 | 250 | |
07f33a03 CM |
251 | .macro tbl_branch, base, tmp, shift |
252 | #ifdef CONFIG_THUMB2_KERNEL | |
253 | adr \tmp, 1f | |
254 | add \tmp, \tmp, \base, lsl \shift | |
255 | mov pc, \tmp | |
256 | #else | |
257 | add pc, pc, \base, lsl \shift | |
1da177e4 | 258 | mov r0, r0 |
07f33a03 CM |
259 | #endif |
260 | 1: | |
261 | .endm | |
262 | ||
263 | ENTRY(vfp_get_float) | |
264 | tbl_branch r0, r3, #3 | |
1da177e4 | 265 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 |
07f33a03 | 266 | 1: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 |
1da177e4 | 267 | mov pc, lr |
07f33a03 CM |
268 | .org 1b + 8 |
269 | 1: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 | |
1da177e4 | 270 | mov pc, lr |
07f33a03 | 271 | .org 1b + 8 |
1da177e4 | 272 | .endr |
93ed3970 | 273 | ENDPROC(vfp_get_float) |
1da177e4 | 274 | |
93ed3970 | 275 | ENTRY(vfp_put_float) |
07f33a03 | 276 | tbl_branch r1, r3, #3 |
1da177e4 | 277 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 |
07f33a03 | 278 | 1: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0 |
1da177e4 | 279 | mov pc, lr |
07f33a03 CM |
280 | .org 1b + 8 |
281 | 1: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 | |
1da177e4 | 282 | mov pc, lr |
07f33a03 | 283 | .org 1b + 8 |
1da177e4 | 284 | .endr |
93ed3970 | 285 | ENDPROC(vfp_put_float) |
1da177e4 | 286 | |
93ed3970 | 287 | ENTRY(vfp_get_double) |
07f33a03 | 288 | tbl_branch r0, r3, #3 |
1da177e4 | 289 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 |
07f33a03 | 290 | 1: fmrrd r0, r1, d\dr |
1da177e4 | 291 | mov pc, lr |
07f33a03 | 292 | .org 1b + 8 |
1da177e4 | 293 | .endr |
25ebee02 CM |
294 | #ifdef CONFIG_VFPv3 |
295 | @ d16 - d31 registers | |
296 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | |
07f33a03 | 297 | 1: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr |
25ebee02 | 298 | mov pc, lr |
07f33a03 | 299 | .org 1b + 8 |
25ebee02 CM |
300 | .endr |
301 | #endif | |
1da177e4 | 302 | |
25ebee02 | 303 | @ virtual register 16 (or 32 if VFPv3) for compare with zero |
1da177e4 LT |
304 | mov r0, #0 |
305 | mov r1, #0 | |
306 | mov pc, lr | |
93ed3970 | 307 | ENDPROC(vfp_get_double) |
1da177e4 | 308 | |
93ed3970 | 309 | ENTRY(vfp_put_double) |
07f33a03 | 310 | tbl_branch r2, r3, #3 |
1da177e4 | 311 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 |
07f33a03 | 312 | 1: fmdrr d\dr, r0, r1 |
1da177e4 | 313 | mov pc, lr |
07f33a03 | 314 | .org 1b + 8 |
1da177e4 | 315 | .endr |
25ebee02 CM |
316 | #ifdef CONFIG_VFPv3 |
317 | @ d16 - d31 registers | |
318 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | |
138de1c4 | 319 | 1: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr |
25ebee02 | 320 | mov pc, lr |
07f33a03 | 321 | .org 1b + 8 |
25ebee02 CM |
322 | .endr |
323 | #endif | |
93ed3970 | 324 | ENDPROC(vfp_put_double) |