Commit | Line | Data |
---|---|---|
1361b83a LT |
1 | /* |
2 | * Copyright (C) 1994 Linus Torvalds | |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | * x86-64 work by Andi Kleen 2002 | |
8 | */ | |
9 | ||
78f7f1e5 IM |
10 | #ifndef _ASM_X86_FPU_INTERNAL_H |
11 | #define _ASM_X86_FPU_INTERNAL_H | |
1361b83a | 12 | |
1361b83a | 13 | #include <linux/regset.h> |
050902c0 | 14 | #include <linux/compat.h> |
952f07ec | 15 | #include <linux/sched.h> |
1361b83a | 16 | #include <linux/slab.h> |
f89e32e0 | 17 | |
1361b83a | 18 | #include <asm/user.h> |
df6b35f4 | 19 | #include <asm/fpu/api.h> |
669ebabb | 20 | #include <asm/fpu/xstate.h> |
1361b83a | 21 | |
72a671ce SS |
22 | #ifdef CONFIG_X86_64 |
23 | # include <asm/sigcontext32.h> | |
24 | # include <asm/user32.h> | |
235b8022 AV |
25 | struct ksignal; |
26 | int ia32_setup_rt_frame(int sig, struct ksignal *ksig, | |
72a671ce | 27 | compat_sigset_t *set, struct pt_regs *regs); |
235b8022 | 28 | int ia32_setup_frame(int sig, struct ksignal *ksig, |
72a671ce SS |
29 | compat_sigset_t *set, struct pt_regs *regs); |
30 | #else | |
31 | # define user_i387_ia32_struct user_i387_struct | |
32 | # define user32_fxsr_struct user_fxsr_struct | |
33 | # define ia32_setup_frame __setup_frame | |
34 | # define ia32_setup_rt_frame __setup_rt_frame | |
35 | #endif | |
36 | ||
df639752 IM |
37 | #define MXCSR_DEFAULT 0x1f80 |
38 | ||
72a671ce | 39 | extern unsigned int mxcsr_feature_mask; |
21c4cd10 | 40 | extern void fpu__init_cpu(void); |
5d2bd700 | 41 | extern void eager_fpu_init(void); |
1361b83a | 42 | |
55cc4678 IM |
43 | extern void fpu__init_system_xstate(void); |
44 | extern void fpu__init_cpu_xstate(void); | |
dd863880 | 45 | extern void fpu__init_system(struct cpuinfo_x86 *c); |
55cc4678 | 46 | |
c4d72e2d | 47 | extern void fpu__activate_curr(struct fpu *fpu); |
952f07ec IM |
48 | extern void fpstate_init(struct fpu *fpu); |
49 | extern void fpu__clear(struct task_struct *tsk); | |
50 | ||
51 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); | |
52 | extern void fpu__restore(void); | |
53 | extern void fpu__init_check_bugs(void); | |
54 | extern void fpu__resume_cpu(void); | |
55 | ||
36b544dc | 56 | DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); |
1361b83a | 57 | |
72a671ce SS |
58 | extern void convert_from_fxsr(struct user_i387_ia32_struct *env, |
59 | struct task_struct *tsk); | |
60 | extern void convert_to_fxsr(struct task_struct *tsk, | |
61 | const struct user_i387_ia32_struct *env); | |
62 | ||
678eaf60 | 63 | extern user_regset_active_fn regset_fpregs_active, regset_xregset_fpregs_active; |
1361b83a LT |
64 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, |
65 | xstateregs_get; | |
66 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, | |
67 | xstateregs_set; | |
68 | ||
1361b83a | 69 | /* |
678eaf60 IM |
70 | * xstateregs_active == regset_fpregs_active. Please refer to the comment |
71 | * at the definition of regset_fpregs_active. | |
1361b83a | 72 | */ |
678eaf60 | 73 | #define xstateregs_active regset_fpregs_active |
1361b83a | 74 | |
1361b83a LT |
75 | #ifdef CONFIG_MATH_EMULATION |
76 | extern void finit_soft_fpu(struct i387_soft_struct *soft); | |
77 | #else | |
78 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} | |
79 | #endif | |
80 | ||
1c927eea | 81 | /* |
36b544dc | 82 | * Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx, |
1c927eea RR |
83 | * on this CPU. |
84 | * | |
85 | * This will disable any lazy FPU state restore of the current FPU state, | |
86 | * but if the current thread owns the FPU, it will still be saved by. | |
87 | */ | |
88 | static inline void __cpu_disable_lazy_restore(unsigned int cpu) | |
89 | { | |
36b544dc | 90 | per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL; |
1c927eea RR |
91 | } |
92 | ||
66ddc2cb | 93 | static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu) |
1c927eea | 94 | { |
66ddc2cb | 95 | return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; |
1c927eea RR |
96 | } |
97 | ||
050902c0 SS |
98 | static inline int is_ia32_compat_frame(void) |
99 | { | |
100 | return config_enabled(CONFIG_IA32_EMULATION) && | |
101 | test_thread_flag(TIF_IA32); | |
102 | } | |
103 | ||
104 | static inline int is_ia32_frame(void) | |
105 | { | |
106 | return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame(); | |
107 | } | |
108 | ||
109 | static inline int is_x32_frame(void) | |
110 | { | |
111 | return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32); | |
112 | } | |
113 | ||
1361b83a LT |
114 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ |
115 | ||
5d2bd700 SS |
116 | static __always_inline __pure bool use_eager_fpu(void) |
117 | { | |
c6b40691 | 118 | return static_cpu_has_safe(X86_FEATURE_EAGER_FPU); |
5d2bd700 SS |
119 | } |
120 | ||
1361b83a LT |
121 | static __always_inline __pure bool use_xsaveopt(void) |
122 | { | |
c6b40691 | 123 | return static_cpu_has_safe(X86_FEATURE_XSAVEOPT); |
1361b83a LT |
124 | } |
125 | ||
126 | static __always_inline __pure bool use_xsave(void) | |
127 | { | |
c6b40691 | 128 | return static_cpu_has_safe(X86_FEATURE_XSAVE); |
1361b83a LT |
129 | } |
130 | ||
131 | static __always_inline __pure bool use_fxsr(void) | |
132 | { | |
c6b40691 | 133 | return static_cpu_has_safe(X86_FEATURE_FXSR); |
1361b83a LT |
134 | } |
135 | ||
5d2bd700 SS |
136 | static inline void fx_finit(struct i387_fxsave_struct *fx) |
137 | { | |
5d2bd700 | 138 | fx->cwd = 0x37f; |
a8615af4 | 139 | fx->mxcsr = MXCSR_DEFAULT; |
5d2bd700 SS |
140 | } |
141 | ||
36e49e7f | 142 | extern void fpstate_sanitize_xstate(struct fpu *fpu); |
1361b83a | 143 | |
49b8c695 PA |
144 | #define user_insn(insn, output, input...) \ |
145 | ({ \ | |
146 | int err; \ | |
147 | asm volatile(ASM_STAC "\n" \ | |
148 | "1:" #insn "\n\t" \ | |
149 | "2: " ASM_CLAC "\n" \ | |
150 | ".section .fixup,\"ax\"\n" \ | |
151 | "3: movl $-1,%[err]\n" \ | |
152 | " jmp 2b\n" \ | |
153 | ".previous\n" \ | |
154 | _ASM_EXTABLE(1b, 3b) \ | |
155 | : [err] "=r" (err), output \ | |
156 | : "0"(0), input); \ | |
157 | err; \ | |
158 | }) | |
159 | ||
0ca5bd0d SS |
160 | #define check_insn(insn, output, input...) \ |
161 | ({ \ | |
162 | int err; \ | |
163 | asm volatile("1:" #insn "\n\t" \ | |
164 | "2:\n" \ | |
165 | ".section .fixup,\"ax\"\n" \ | |
166 | "3: movl $-1,%[err]\n" \ | |
167 | " jmp 2b\n" \ | |
168 | ".previous\n" \ | |
169 | _ASM_EXTABLE(1b, 3b) \ | |
170 | : [err] "=r" (err), output \ | |
171 | : "0"(0), input); \ | |
172 | err; \ | |
173 | }) | |
174 | ||
175 | static inline int fsave_user(struct i387_fsave_struct __user *fx) | |
1361b83a | 176 | { |
49b8c695 | 177 | return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); |
1361b83a LT |
178 | } |
179 | ||
180 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | |
181 | { | |
0ca5bd0d | 182 | if (config_enabled(CONFIG_X86_32)) |
49b8c695 | 183 | return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); |
0ca5bd0d | 184 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) |
49b8c695 | 185 | return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); |
1361b83a | 186 | |
0ca5bd0d | 187 | /* See comment in fpu_fxsave() below. */ |
49b8c695 | 188 | return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); |
1361b83a LT |
189 | } |
190 | ||
0ca5bd0d | 191 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
1361b83a | 192 | { |
0ca5bd0d SS |
193 | if (config_enabled(CONFIG_X86_32)) |
194 | return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
195 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | |
196 | return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
1361b83a | 197 | |
0ca5bd0d SS |
198 | /* See comment in fpu_fxsave() below. */ |
199 | return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), | |
200 | "m" (*fx)); | |
1361b83a LT |
201 | } |
202 | ||
e139e955 PA |
203 | static inline int fxrstor_user(struct i387_fxsave_struct __user *fx) |
204 | { | |
205 | if (config_enabled(CONFIG_X86_32)) | |
206 | return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
207 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | |
208 | return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
209 | ||
210 | /* See comment in fpu_fxsave() below. */ | |
211 | return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), | |
212 | "m" (*fx)); | |
213 | } | |
214 | ||
0ca5bd0d | 215 | static inline int frstor_checking(struct i387_fsave_struct *fx) |
1361b83a | 216 | { |
0ca5bd0d | 217 | return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
e139e955 PA |
218 | } |
219 | ||
220 | static inline int frstor_user(struct i387_fsave_struct __user *fx) | |
221 | { | |
222 | return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
1361b83a LT |
223 | } |
224 | ||
225 | static inline void fpu_fxsave(struct fpu *fpu) | |
226 | { | |
0ca5bd0d | 227 | if (config_enabled(CONFIG_X86_32)) |
7366ed77 | 228 | asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave)); |
0ca5bd0d | 229 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) |
7366ed77 | 230 | asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave)); |
0ca5bd0d SS |
231 | else { |
232 | /* Using "rex64; fxsave %0" is broken because, if the memory | |
233 | * operand uses any extended registers for addressing, a second | |
234 | * REX prefix will be generated (to the assembler, rex64 | |
235 | * followed by semicolon is a separate instruction), and hence | |
236 | * the 64-bitness is lost. | |
237 | * | |
238 | * Using "fxsaveq %0" would be the ideal choice, but is only | |
239 | * supported starting with gas 2.16. | |
240 | * | |
241 | * Using, as a workaround, the properly prefixed form below | |
242 | * isn't accepted by any binutils version so far released, | |
243 | * complaining that the same type of prefix is used twice if | |
244 | * an extended register is needed for addressing (fix submitted | |
245 | * to mainline 2005-11-21). | |
246 | * | |
7366ed77 | 247 | * asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave)); |
0ca5bd0d SS |
248 | * |
249 | * This, however, we can work around by forcing the compiler to | |
250 | * select an addressing mode that doesn't require extended | |
251 | * registers. | |
252 | */ | |
253 | asm volatile( "rex64/fxsave (%[fx])" | |
7366ed77 IM |
254 | : "=m" (fpu->state.fxsave) |
255 | : [fx] "R" (&fpu->state.fxsave)); | |
0ca5bd0d | 256 | } |
1361b83a LT |
257 | } |
258 | ||
1361b83a LT |
259 | /* |
260 | * These must be called with preempt disabled. Returns | |
4f836347 IM |
261 | * 'true' if the FPU state is still intact and we can |
262 | * keep registers active. | |
263 | * | |
264 | * The legacy FNSAVE instruction cleared all FPU state | |
265 | * unconditionally, so registers are essentially destroyed. | |
266 | * Modern FPU state can be kept in registers, if there are | |
1bc6b056 | 267 | * no pending FP exceptions. |
1361b83a | 268 | */ |
4f836347 | 269 | static inline int copy_fpregs_to_fpstate(struct fpu *fpu) |
1361b83a | 270 | { |
1bc6b056 | 271 | if (likely(use_xsave())) { |
7366ed77 | 272 | xsave_state(&fpu->state.xsave); |
1bc6b056 IM |
273 | return 1; |
274 | } | |
1361b83a | 275 | |
1bc6b056 IM |
276 | if (likely(use_fxsr())) { |
277 | fpu_fxsave(fpu); | |
278 | return 1; | |
1361b83a LT |
279 | } |
280 | ||
281 | /* | |
1bc6b056 IM |
282 | * Legacy FPU register saving, FNSAVE always clears FPU registers, |
283 | * so we have to mark them inactive: | |
1361b83a | 284 | */ |
7366ed77 | 285 | asm volatile("fnsave %[fx]; fwait" : [fx] "=m" (fpu->state.fsave)); |
4f836347 | 286 | |
4f836347 | 287 | return 0; |
1361b83a LT |
288 | } |
289 | ||
e2295375 IM |
290 | extern void fpu__save(struct fpu *fpu); |
291 | ||
0e75c54f | 292 | static inline int __copy_fpstate_to_fpregs(struct fpu *fpu) |
1361b83a LT |
293 | { |
294 | if (use_xsave()) | |
7366ed77 | 295 | return fpu_xrstor_checking(&fpu->state.xsave); |
0ca5bd0d | 296 | else if (use_fxsr()) |
7366ed77 | 297 | return fxrstor_checking(&fpu->state.fxsave); |
1361b83a | 298 | else |
7366ed77 | 299 | return frstor_checking(&fpu->state.fsave); |
1361b83a LT |
300 | } |
301 | ||
0e75c54f | 302 | static inline int copy_fpstate_to_fpregs(struct fpu *fpu) |
1361b83a | 303 | { |
6ca7a8a1 BP |
304 | /* |
305 | * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is | |
306 | * pending. Clear the x87 state here by setting it to fixed values. | |
307 | * "m" is a random variable that should be in L1. | |
308 | */ | |
9b13a93d | 309 | if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) { |
26bef131 LT |
310 | asm volatile( |
311 | "fnclex\n\t" | |
312 | "emms\n\t" | |
313 | "fildl %P[addr]" /* set F?P to defined value */ | |
d5cea9b0 | 314 | : : [addr] "m" (fpu->fpregs_active)); |
26bef131 | 315 | } |
1361b83a | 316 | |
0e75c54f | 317 | return __copy_fpstate_to_fpregs(fpu); |
1361b83a LT |
318 | } |
319 | ||
32b49b3c IM |
320 | /* |
321 | * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation' | |
322 | * idiom, which is then paired with the sw-flag (fpregs_active) later on: | |
323 | */ | |
324 | ||
325 | static inline void __fpregs_activate_hw(void) | |
326 | { | |
327 | if (!use_eager_fpu()) | |
328 | clts(); | |
329 | } | |
330 | ||
331 | static inline void __fpregs_deactivate_hw(void) | |
332 | { | |
333 | if (!use_eager_fpu()) | |
334 | stts(); | |
335 | } | |
336 | ||
337 | /* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */ | |
723c58e4 | 338 | static inline void __fpregs_deactivate(struct fpu *fpu) |
1361b83a | 339 | { |
d5cea9b0 | 340 | fpu->fpregs_active = 0; |
36b544dc | 341 | this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
1361b83a LT |
342 | } |
343 | ||
32b49b3c | 344 | /* Must be paired with a 'clts' (fpregs_activate_hw()) before! */ |
dfaea4e6 | 345 | static inline void __fpregs_activate(struct fpu *fpu) |
1361b83a | 346 | { |
d5cea9b0 | 347 | fpu->fpregs_active = 1; |
c0311f63 | 348 | this_cpu_write(fpu_fpregs_owner_ctx, fpu); |
1361b83a LT |
349 | } |
350 | ||
952f07ec IM |
351 | /* |
352 | * The question "does this thread have fpu access?" | |
353 | * is slightly racy, since preemption could come in | |
354 | * and revoke it immediately after the test. | |
355 | * | |
356 | * However, even in that very unlikely scenario, | |
357 | * we can just assume we have FPU access - typically | |
358 | * to save the FP state - we'll just take a #NM | |
359 | * fault and get the FPU access back. | |
360 | */ | |
3c6dffa9 | 361 | static inline int fpregs_active(void) |
952f07ec IM |
362 | { |
363 | return current->thread.fpu.fpregs_active; | |
364 | } | |
365 | ||
1361b83a LT |
366 | /* |
367 | * Encapsulate the CR0.TS handling together with the | |
368 | * software flag. | |
369 | * | |
370 | * These generally need preemption protection to work, | |
371 | * do try to avoid using these on their own. | |
372 | */ | |
66af8e27 | 373 | static inline void fpregs_activate(struct fpu *fpu) |
1361b83a | 374 | { |
32b49b3c | 375 | __fpregs_activate_hw(); |
66af8e27 | 376 | __fpregs_activate(fpu); |
1361b83a LT |
377 | } |
378 | ||
66af8e27 | 379 | static inline void fpregs_deactivate(struct fpu *fpu) |
1361b83a | 380 | { |
66af8e27 | 381 | __fpregs_deactivate(fpu); |
32b49b3c | 382 | __fpregs_deactivate_hw(); |
1361b83a LT |
383 | } |
384 | ||
50338615 IM |
385 | /* |
386 | * Drops current FPU state: deactivates the fpregs and | |
387 | * the fpstate. NOTE: it still leaves previous contents | |
388 | * in the fpregs in the eager-FPU case. | |
389 | * | |
390 | * This function can be used in cases where we know that | |
391 | * a state-restore is coming: either an explicit one, | |
392 | * or a reschedule. | |
393 | */ | |
394 | static inline void fpu__drop(struct fpu *fpu) | |
304bceda | 395 | { |
d2d0ac9a | 396 | preempt_disable(); |
ca6787ba | 397 | fpu->counter = 0; |
d2d0ac9a | 398 | |
d5cea9b0 | 399 | if (fpu->fpregs_active) { |
304bceda SS |
400 | /* Ignore delayed exceptions from user space */ |
401 | asm volatile("1: fwait\n" | |
402 | "2:\n" | |
403 | _ASM_EXTABLE(1b, 2b)); | |
66af8e27 | 404 | fpregs_deactivate(fpu); |
304bceda | 405 | } |
304bceda | 406 | |
c5bedc68 | 407 | fpu->fpstate_active = 0; |
4c138410 | 408 | |
304bceda SS |
409 | preempt_enable(); |
410 | } | |
411 | ||
8f4d8186 ON |
412 | static inline void restore_init_xstate(void) |
413 | { | |
414 | if (use_xsave()) | |
3e5e1267 | 415 | xrstor_state(&init_xstate_ctx, -1); |
8f4d8186 | 416 | else |
3e5e1267 | 417 | fxrstor_checking(&init_xstate_ctx.i387); |
8f4d8186 ON |
418 | } |
419 | ||
b85e67d1 | 420 | /* |
50338615 | 421 | * Reset the FPU state back to init state. |
b85e67d1 | 422 | */ |
50338615 | 423 | static inline void fpu__reset(struct fpu *fpu) |
304bceda | 424 | { |
5d2bd700 | 425 | if (!use_eager_fpu()) |
50338615 | 426 | fpu__drop(fpu); |
8f4d8186 ON |
427 | else |
428 | restore_init_xstate(); | |
304bceda SS |
429 | } |
430 | ||
befc61ad IM |
431 | /* |
432 | * Definitions for the eXtended Control Register instructions | |
433 | */ | |
434 | ||
435 | #define XCR_XFEATURE_ENABLED_MASK 0x00000000 | |
436 | ||
437 | static inline u64 xgetbv(u32 index) | |
438 | { | |
439 | u32 eax, edx; | |
440 | ||
441 | asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */ | |
442 | : "=a" (eax), "=d" (edx) | |
443 | : "c" (index)); | |
444 | return eax + ((u64)edx << 32); | |
445 | } | |
446 | ||
447 | static inline void xsetbv(u32 index, u64 value) | |
448 | { | |
449 | u32 eax = value; | |
450 | u32 edx = value >> 32; | |
451 | ||
452 | asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */ | |
453 | : : "a" (eax), "d" (edx), "c" (index)); | |
454 | } | |
455 | ||
1361b83a LT |
456 | /* |
457 | * FPU state switching for scheduling. | |
458 | * | |
459 | * This is a two-stage process: | |
460 | * | |
461 | * - switch_fpu_prepare() saves the old state and | |
462 | * sets the new state of the CR0.TS bit. This is | |
463 | * done within the context of the old process. | |
464 | * | |
465 | * - switch_fpu_finish() restores the new state as | |
466 | * necessary. | |
467 | */ | |
468 | typedef struct { int preload; } fpu_switch_t; | |
469 | ||
cb8818b6 IM |
470 | static inline fpu_switch_t |
471 | switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) | |
1361b83a LT |
472 | { |
473 | fpu_switch_t fpu; | |
474 | ||
304bceda SS |
475 | /* |
476 | * If the task has used the math, pre-load the FPU on xsave processors | |
477 | * or if the past 5 consecutive context-switches used math. | |
478 | */ | |
c5bedc68 | 479 | fpu.preload = new_fpu->fpstate_active && |
cb8818b6 | 480 | (use_eager_fpu() || new_fpu->counter > 5); |
1361ef29 | 481 | |
d5cea9b0 | 482 | if (old_fpu->fpregs_active) { |
4f836347 | 483 | if (!copy_fpregs_to_fpstate(old_fpu)) |
cb8818b6 | 484 | old_fpu->last_cpu = -1; |
1361ef29 | 485 | else |
cb8818b6 | 486 | old_fpu->last_cpu = cpu; |
1361ef29 | 487 | |
36b544dc | 488 | /* But leave fpu_fpregs_owner_ctx! */ |
d5cea9b0 | 489 | old_fpu->fpregs_active = 0; |
1361b83a LT |
490 | |
491 | /* Don't change CR0.TS if we just switch! */ | |
492 | if (fpu.preload) { | |
cb8818b6 | 493 | new_fpu->counter++; |
dfaea4e6 | 494 | __fpregs_activate(new_fpu); |
7366ed77 | 495 | prefetch(&new_fpu->state); |
32b49b3c IM |
496 | } else { |
497 | __fpregs_deactivate_hw(); | |
498 | } | |
1361b83a | 499 | } else { |
cb8818b6 IM |
500 | old_fpu->counter = 0; |
501 | old_fpu->last_cpu = -1; | |
1361b83a | 502 | if (fpu.preload) { |
cb8818b6 | 503 | new_fpu->counter++; |
66ddc2cb | 504 | if (fpu_want_lazy_restore(new_fpu, cpu)) |
1361b83a LT |
505 | fpu.preload = 0; |
506 | else | |
7366ed77 | 507 | prefetch(&new_fpu->state); |
232f62cd | 508 | fpregs_activate(new_fpu); |
1361b83a LT |
509 | } |
510 | } | |
511 | return fpu; | |
512 | } | |
513 | ||
514 | /* | |
515 | * By the time this gets called, we've already cleared CR0.TS and | |
516 | * given the process the FPU if we are going to preload the FPU | |
517 | * state - all we need to do is to conditionally restore the register | |
518 | * state itself. | |
519 | */ | |
384a23f9 | 520 | static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch) |
1361b83a | 521 | { |
384a23f9 | 522 | if (fpu_switch.preload) { |
0e75c54f | 523 | if (unlikely(copy_fpstate_to_fpregs(new_fpu))) |
50338615 | 524 | fpu__reset(new_fpu); |
1361b83a LT |
525 | } |
526 | } | |
527 | ||
528 | /* | |
529 | * Signal frame handlers... | |
530 | */ | |
c8e14041 | 531 | extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fx, int size); |
72a671ce | 532 | extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size); |
1361b83a | 533 | |
72a671ce | 534 | static inline int xstate_sigframe_size(void) |
1361b83a | 535 | { |
72a671ce SS |
536 | return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size; |
537 | } | |
538 | ||
539 | static inline int restore_xstate_sig(void __user *buf, int ia32_frame) | |
540 | { | |
541 | void __user *buf_fx = buf; | |
542 | int size = xstate_sigframe_size(); | |
543 | ||
544 | if (ia32_frame && use_fxsr()) { | |
545 | buf_fx = buf + sizeof(struct i387_fsave_struct); | |
546 | size += sizeof(struct i387_fsave_struct); | |
1361b83a | 547 | } |
72a671ce SS |
548 | |
549 | return __restore_xstate_sig(buf, buf_fx, size); | |
1361b83a LT |
550 | } |
551 | ||
552 | /* | |
fb14b4ea | 553 | * Needs to be preemption-safe. |
1361b83a | 554 | * |
377ffbcc | 555 | * NOTE! user_fpu_begin() must be used only immediately before restoring |
fb14b4ea ON |
556 | * the save state. It does not do any saving/restoring on its own. In |
557 | * lazy FPU mode, it is just an optimization to avoid a #NM exception, | |
558 | * the task can lose the FPU right after preempt_enable(). | |
1361b83a | 559 | */ |
1361b83a LT |
560 | static inline void user_fpu_begin(void) |
561 | { | |
4540d3fa IM |
562 | struct fpu *fpu = ¤t->thread.fpu; |
563 | ||
1361b83a | 564 | preempt_disable(); |
3c6dffa9 | 565 | if (!fpregs_active()) |
232f62cd | 566 | fpregs_activate(fpu); |
1361b83a LT |
567 | preempt_enable(); |
568 | } | |
569 | ||
1361b83a LT |
570 | /* |
571 | * i387 state interaction | |
572 | */ | |
573 | static inline unsigned short get_fpu_cwd(struct task_struct *tsk) | |
574 | { | |
575 | if (cpu_has_fxsr) { | |
7366ed77 | 576 | return tsk->thread.fpu.state.fxsave.cwd; |
1361b83a | 577 | } else { |
7366ed77 | 578 | return (unsigned short)tsk->thread.fpu.state.fsave.cwd; |
1361b83a LT |
579 | } |
580 | } | |
581 | ||
582 | static inline unsigned short get_fpu_swd(struct task_struct *tsk) | |
583 | { | |
584 | if (cpu_has_fxsr) { | |
7366ed77 | 585 | return tsk->thread.fpu.state.fxsave.swd; |
1361b83a | 586 | } else { |
7366ed77 | 587 | return (unsigned short)tsk->thread.fpu.state.fsave.swd; |
1361b83a LT |
588 | } |
589 | } | |
590 | ||
591 | static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) | |
592 | { | |
593 | if (cpu_has_xmm) { | |
7366ed77 | 594 | return tsk->thread.fpu.state.fxsave.mxcsr; |
1361b83a LT |
595 | } else { |
596 | return MXCSR_DEFAULT; | |
597 | } | |
598 | } | |
599 | ||
c69e098b | 600 | extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu); |
1361b83a | 601 | |
72a671ce SS |
602 | static inline unsigned long |
603 | alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx, | |
604 | unsigned long *size) | |
605 | { | |
606 | unsigned long frame_size = xstate_sigframe_size(); | |
607 | ||
608 | *buf_fx = sp = round_down(sp - frame_size, 64); | |
609 | if (ia32_frame && use_fxsr()) { | |
610 | frame_size += sizeof(struct i387_fsave_struct); | |
611 | sp -= sizeof(struct i387_fsave_struct); | |
612 | } | |
613 | ||
614 | *size = frame_size; | |
615 | return sp; | |
616 | } | |
1361b83a | 617 | |
78f7f1e5 | 618 | #endif /* _ASM_X86_FPU_INTERNAL_H */ |