Commit | Line | Data |
---|---|---|
14cf11af PM |
1 | /* |
2 | * FPU support code, moved here from head.S so that it can be used | |
3 | * by chips which use other head-whatever.S files. | |
4 | * | |
fea23bfe PM |
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
6 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | |
7 | * Copyright (C) 1996 Paul Mackerras. | |
8 | * Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | |
9 | * | |
14cf11af PM |
10 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | |
12 | * as published by the Free Software Foundation; either version | |
13 | * 2 of the License, or (at your option) any later version. | |
14 | * | |
15 | */ | |
16 | ||
b3b8dc6c | 17 | #include <asm/reg.h> |
14cf11af PM |
18 | #include <asm/page.h> |
19 | #include <asm/mmu.h> | |
20 | #include <asm/pgtable.h> | |
21 | #include <asm/cputable.h> | |
22 | #include <asm/cache.h> | |
23 | #include <asm/thread_info.h> | |
24 | #include <asm/ppc_asm.h> | |
25 | #include <asm/asm-offsets.h> | |
46f52210 | 26 | #include <asm/ptrace.h> |
14cf11af | 27 | |
72ffff5b | 28 | #ifdef CONFIG_VSX |
0b7673c3 | 29 | #define __REST_32FPVSRS(n,c,base) \ |
72ffff5b MN |
30 | BEGIN_FTR_SECTION \ |
31 | b 2f; \ | |
32 | END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ | |
33 | REST_32FPRS(n,base); \ | |
34 | b 3f; \ | |
35 | 2: REST_32VSRS(n,c,base); \ | |
36 | 3: | |
37 | ||
0b7673c3 | 38 | #define __SAVE_32FPVSRS(n,c,base) \ |
72ffff5b MN |
39 | BEGIN_FTR_SECTION \ |
40 | b 2f; \ | |
41 | END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ | |
42 | SAVE_32FPRS(n,base); \ | |
43 | b 3f; \ | |
44 | 2: SAVE_32VSRS(n,c,base); \ | |
45 | 3: | |
46 | #else | |
0b7673c3 MN |
47 | #define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base) |
48 | #define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base) | |
72ffff5b | 49 | #endif |
0b7673c3 MN |
50 | #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base) |
51 | #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base) | |
72ffff5b | 52 | |
a2dcbb32 | 53 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
a2dcbb32 MN |
54 | /* void do_load_up_transact_fpu(struct thread_struct *thread) |
55 | * | |
56 | * This is similar to load_up_fpu but for the transactional version of the FP | |
57 | * register set. It doesn't mess with the task MSR or valid flags. | |
58 | * Furthermore, we don't do lazy FP with TM currently. | |
59 | */ | |
60 | _GLOBAL(do_load_up_transact_fpu) | |
61 | mfmsr r6 | |
62 | ori r5,r6,MSR_FP | |
63 | #ifdef CONFIG_VSX | |
64 | BEGIN_FTR_SECTION | |
65 | oris r5,r5,MSR_VSX@h | |
66 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
67 | #endif | |
68 | SYNC | |
69 | MTMSRD(r5) | |
70 | ||
de79f7b9 PM |
71 | addi r7,r3,THREAD_TRANSACT_FPSTATE |
72 | lfd fr0,FPSTATE_FPSCR(r7) | |
a2dcbb32 | 73 | MTFSF_L(fr0) |
de79f7b9 | 74 | REST_32FPVSRS(0, R4, R7) |
a2dcbb32 MN |
75 | |
76 | /* FP/VSX off again */ | |
77 | MTMSRD(r6) | |
78 | SYNC | |
79 | ||
80 | blr | |
81 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | |
82 | ||
d31626f7 PM |
83 | /* |
84 | * Enable use of the FPU, and VSX if possible, for the caller. | |
85 | */ | |
86 | _GLOBAL(fp_enable) | |
87 | mfmsr r3 | |
88 | ori r3,r3,MSR_FP | |
89 | #ifdef CONFIG_VSX | |
90 | BEGIN_FTR_SECTION | |
91 | oris r3,r3,MSR_VSX@h | |
92 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
93 | #endif | |
94 | SYNC | |
95 | MTMSRD(r3) | |
96 | isync /* (not necessary for arch 2.02 and later) */ | |
97 | blr | |
98 | ||
18461960 PM |
99 | /* |
100 | * Load state from memory into FP registers including FPSCR. | |
101 | * Assumes the caller has enabled FP in the MSR. | |
102 | */ | |
103 | _GLOBAL(load_fp_state) | |
104 | lfd fr0,FPSTATE_FPSCR(r3) | |
105 | MTFSF_L(fr0) | |
106 | REST_32FPVSRS(0, R4, R3) | |
107 | blr | |
108 | ||
109 | /* | |
110 | * Store FP state into memory, including FPSCR | |
111 | * Assumes the caller has enabled FP in the MSR. | |
112 | */ | |
113 | _GLOBAL(store_fp_state) | |
114 | SAVE_32FPVSRS(0, R4, R3) | |
115 | mffs fr0 | |
116 | stfd fr0,FPSTATE_FPSCR(r3) | |
117 | blr | |
118 | ||
14cf11af PM |
119 | /* |
120 | * This task wants to use the FPU now. | |
121 | * On UP, disable FP for the task which had the FPU previously, | |
122 | * and save its floating-point registers in its thread_struct. | |
123 | * Load up this task's FP registers from its thread_struct, | |
124 | * enable the FPU for the current task and return to the task. | |
955c1cab PM |
125 | * Note that on 32-bit this can only use registers that will be |
126 | * restored by fast_exception_return, i.e. r3 - r6, r10 and r11. | |
14cf11af | 127 | */ |
b85a046a | 128 | _GLOBAL(load_up_fpu) |
14cf11af PM |
129 | mfmsr r5 |
130 | ori r5,r5,MSR_FP | |
ce48b210 MN |
131 | #ifdef CONFIG_VSX |
132 | BEGIN_FTR_SECTION | |
133 | oris r5,r5,MSR_VSX@h | |
134 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
135 | #endif | |
14cf11af PM |
136 | SYNC |
137 | MTMSRD(r5) /* enable use of fpu now */ | |
138 | isync | |
139 | /* | |
140 | * For SMP, we don't do lazy FPU switching because it just gets too | |
141 | * horrendously complex, especially when a task switches from one CPU | |
142 | * to another. Instead we call giveup_fpu in switch_to. | |
143 | */ | |
144 | #ifndef CONFIG_SMP | |
e58c3495 | 145 | LOAD_REG_ADDRBASE(r3, last_task_used_math) |
6316222e | 146 | toreal(r3) |
e58c3495 | 147 | PPC_LL r4,ADDROFF(last_task_used_math)(r3) |
3ddfbcf1 | 148 | PPC_LCMPI 0,r4,0 |
14cf11af | 149 | beq 1f |
6316222e | 150 | toreal(r4) |
14cf11af | 151 | addi r4,r4,THREAD /* want last_task_used_math->thread */ |
955c1cab PM |
152 | addi r10,r4,THREAD_FPSTATE |
153 | SAVE_32FPVSRS(0, R5, R10) | |
14cf11af | 154 | mffs fr0 |
955c1cab | 155 | stfd fr0,FPSTATE_FPSCR(r10) |
3ddfbcf1 | 156 | PPC_LL r5,PT_REGS(r4) |
6316222e | 157 | toreal(r5) |
3ddfbcf1 | 158 | PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
14cf11af PM |
159 | li r10,MSR_FP|MSR_FE0|MSR_FE1 |
160 | andc r4,r4,r10 /* disable FP for previous task */ | |
3ddfbcf1 | 161 | PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
14cf11af PM |
162 | 1: |
163 | #endif /* CONFIG_SMP */ | |
164 | /* enable use of FP after return */ | |
b85a046a | 165 | #ifdef CONFIG_PPC32 |
de79f7b9 | 166 | mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ |
14cf11af PM |
167 | lwz r4,THREAD_FPEXC_MODE(r5) |
168 | ori r9,r9,MSR_FP /* enable FP for current */ | |
169 | or r9,r9,r4 | |
b85a046a PM |
170 | #else |
171 | ld r4,PACACURRENT(r13) | |
172 | addi r5,r4,THREAD /* Get THREAD */ | |
e2f5a3c1 | 173 | lwz r4,THREAD_FPEXC_MODE(r5) |
b85a046a PM |
174 | ori r12,r12,MSR_FP |
175 | or r12,r12,r4 | |
176 | std r12,_MSR(r1) | |
177 | #endif | |
955c1cab PM |
178 | addi r10,r5,THREAD_FPSTATE |
179 | lfd fr0,FPSTATE_FPSCR(r10) | |
3a2c48cf | 180 | MTFSF_L(fr0) |
955c1cab | 181 | REST_32FPVSRS(0, R4, R10) |
14cf11af PM |
182 | #ifndef CONFIG_SMP |
183 | subi r4,r5,THREAD | |
6316222e | 184 | fromreal(r4) |
e58c3495 | 185 | PPC_STL r4,ADDROFF(last_task_used_math)(r3) |
14cf11af PM |
186 | #endif /* CONFIG_SMP */ |
187 | /* restore registers and return */ | |
188 | /* we haven't used ctr or xer or lr */ | |
6f3d8e69 | 189 | blr |
14cf11af | 190 | |
14cf11af PM |
191 | /* |
192 | * giveup_fpu(tsk) | |
193 | * Disable FP for the task given as the argument, | |
194 | * and save the floating-point registers in its thread_struct. | |
195 | * Enables the FPU for use in the kernel on return. | |
196 | */ | |
b85a046a | 197 | _GLOBAL(giveup_fpu) |
14cf11af PM |
198 | mfmsr r5 |
199 | ori r5,r5,MSR_FP | |
ce48b210 MN |
200 | #ifdef CONFIG_VSX |
201 | BEGIN_FTR_SECTION | |
202 | oris r5,r5,MSR_VSX@h | |
203 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
204 | #endif | |
14cf11af PM |
205 | SYNC_601 |
206 | ISYNC_601 | |
207 | MTMSRD(r5) /* enable use of fpu now */ | |
208 | SYNC_601 | |
209 | isync | |
3ddfbcf1 | 210 | PPC_LCMPI 0,r3,0 |
14cf11af PM |
211 | beqlr- /* if no previous owner, done */ |
212 | addi r3,r3,THREAD /* want THREAD of task */ | |
18461960 | 213 | PPC_LL r6,THREAD_FPSAVEAREA(r3) |
3ddfbcf1 | 214 | PPC_LL r5,PT_REGS(r3) |
18461960 PM |
215 | PPC_LCMPI 0,r6,0 |
216 | bne 2f | |
de79f7b9 | 217 | addi r6,r3,THREAD_FPSTATE |
18461960 | 218 | 2: PPC_LCMPI 0,r5,0 |
de79f7b9 | 219 | SAVE_32FPVSRS(0, R4, R6) |
14cf11af | 220 | mffs fr0 |
de79f7b9 | 221 | stfd fr0,FPSTATE_FPSCR(r6) |
14cf11af | 222 | beq 1f |
3ddfbcf1 | 223 | PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
14cf11af | 224 | li r3,MSR_FP|MSR_FE0|MSR_FE1 |
7e875e9d MN |
225 | #ifdef CONFIG_VSX |
226 | BEGIN_FTR_SECTION | |
227 | oris r3,r3,MSR_VSX@h | |
228 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
229 | #endif | |
14cf11af | 230 | andc r4,r4,r3 /* disable FP for previous task */ |
3ddfbcf1 | 231 | PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
14cf11af PM |
232 | 1: |
233 | #ifndef CONFIG_SMP | |
234 | li r5,0 | |
e58c3495 DG |
235 | LOAD_REG_ADDRBASE(r4,last_task_used_math) |
236 | PPC_STL r5,ADDROFF(last_task_used_math)(r4) | |
14cf11af PM |
237 | #endif /* CONFIG_SMP */ |
238 | blr | |
25c8a78b DG |
239 | |
240 | /* | |
241 | * These are used in the alignment trap handler when emulating | |
242 | * single-precision loads and stores. | |
25c8a78b DG |
243 | */ |
244 | ||
245 | _GLOBAL(cvt_fd) | |
25c8a78b DG |
246 | lfs 0,0(r3) |
247 | stfd 0,0(r4) | |
25c8a78b DG |
248 | blr |
249 | ||
250 | _GLOBAL(cvt_df) | |
25c8a78b DG |
251 | lfd 0,0(r3) |
252 | stfs 0,0(r4) | |
25c8a78b | 253 | blr |