Commit | Line | Data |
---|---|---|
1361b83a LT |
1 | /* |
2 | * Copyright (C) 1994 Linus Torvalds | |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | * x86-64 work by Andi Kleen 2002 | |
8 | */ | |
9 | ||
10 | #ifndef _FPU_INTERNAL_H | |
11 | #define _FPU_INTERNAL_H | |
12 | ||
1361b83a | 13 | #include <linux/regset.h> |
050902c0 | 14 | #include <linux/compat.h> |
1361b83a | 15 | #include <linux/slab.h> |
f89e32e0 | 16 | |
1361b83a | 17 | #include <asm/user.h> |
f89e32e0 | 18 | #include <asm/i387.h> |
1361b83a LT |
19 | #include <asm/xsave.h> |
20 | ||
72a671ce SS |
21 | #ifdef CONFIG_X86_64 |
22 | # include <asm/sigcontext32.h> | |
23 | # include <asm/user32.h> | |
235b8022 AV |
24 | struct ksignal; |
25 | int ia32_setup_rt_frame(int sig, struct ksignal *ksig, | |
72a671ce | 26 | compat_sigset_t *set, struct pt_regs *regs); |
235b8022 | 27 | int ia32_setup_frame(int sig, struct ksignal *ksig, |
72a671ce SS |
28 | compat_sigset_t *set, struct pt_regs *regs); |
29 | #else | |
30 | # define user_i387_ia32_struct user_i387_struct | |
31 | # define user32_fxsr_struct user_fxsr_struct | |
32 | # define ia32_setup_frame __setup_frame | |
33 | # define ia32_setup_rt_frame __setup_rt_frame | |
34 | #endif | |
35 | ||
36 | extern unsigned int mxcsr_feature_mask; | |
3a9c4b0d | 37 | extern void fpu__cpu_init(void); |
5d2bd700 | 38 | extern void eager_fpu_init(void); |
1361b83a | 39 | |
36b544dc | 40 | DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); |
1361b83a | 41 | |
72a671ce SS |
42 | extern void convert_from_fxsr(struct user_i387_ia32_struct *env, |
43 | struct task_struct *tsk); | |
44 | extern void convert_to_fxsr(struct task_struct *tsk, | |
45 | const struct user_i387_ia32_struct *env); | |
46 | ||
1361b83a LT |
47 | extern user_regset_active_fn fpregs_active, xfpregs_active; |
48 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, | |
49 | xstateregs_get; | |
50 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, | |
51 | xstateregs_set; | |
52 | ||
1361b83a LT |
53 | /* |
54 | * xstateregs_active == fpregs_active. Please refer to the comment | |
55 | * at the definition of fpregs_active. | |
56 | */ | |
57 | #define xstateregs_active fpregs_active | |
58 | ||
1361b83a LT |
59 | #ifdef CONFIG_MATH_EMULATION |
60 | extern void finit_soft_fpu(struct i387_soft_struct *soft); | |
61 | #else | |
62 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} | |
63 | #endif | |
64 | ||
1c927eea | 65 | /* |
36b544dc | 66 | * Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx, |
1c927eea RR |
67 | * on this CPU. |
68 | * | |
69 | * This will disable any lazy FPU state restore of the current FPU state, | |
70 | * but if the current thread owns the FPU, it will still be saved by. | |
71 | */ | |
72 | static inline void __cpu_disable_lazy_restore(unsigned int cpu) | |
73 | { | |
36b544dc | 74 | per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL; |
1c927eea RR |
75 | } |
76 | ||
33e03ded RR |
77 | /* |
78 | * Used to indicate that the FPU state in memory is newer than the FPU | |
79 | * state in registers, and the FPU state should be reloaded next time the | |
80 | * task is run. Only safe on the current task, or non-running tasks. | |
81 | */ | |
82 | static inline void task_disable_lazy_fpu_restore(struct task_struct *tsk) | |
83 | { | |
84 | tsk->thread.fpu.last_cpu = ~0; | |
85 | } | |
86 | ||
1c927eea RR |
87 | static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) |
88 | { | |
36b544dc | 89 | return &new->thread.fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && |
1c927eea RR |
90 | cpu == new->thread.fpu.last_cpu; |
91 | } | |
92 | ||
050902c0 SS |
93 | static inline int is_ia32_compat_frame(void) |
94 | { | |
95 | return config_enabled(CONFIG_IA32_EMULATION) && | |
96 | test_thread_flag(TIF_IA32); | |
97 | } | |
98 | ||
99 | static inline int is_ia32_frame(void) | |
100 | { | |
101 | return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame(); | |
102 | } | |
103 | ||
104 | static inline int is_x32_frame(void) | |
105 | { | |
106 | return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32); | |
107 | } | |
108 | ||
1361b83a LT |
109 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ |
110 | ||
5d2bd700 SS |
111 | static __always_inline __pure bool use_eager_fpu(void) |
112 | { | |
c6b40691 | 113 | return static_cpu_has_safe(X86_FEATURE_EAGER_FPU); |
5d2bd700 SS |
114 | } |
115 | ||
1361b83a LT |
116 | static __always_inline __pure bool use_xsaveopt(void) |
117 | { | |
c6b40691 | 118 | return static_cpu_has_safe(X86_FEATURE_XSAVEOPT); |
1361b83a LT |
119 | } |
120 | ||
121 | static __always_inline __pure bool use_xsave(void) | |
122 | { | |
c6b40691 | 123 | return static_cpu_has_safe(X86_FEATURE_XSAVE); |
1361b83a LT |
124 | } |
125 | ||
126 | static __always_inline __pure bool use_fxsr(void) | |
127 | { | |
c6b40691 | 128 | return static_cpu_has_safe(X86_FEATURE_FXSR); |
1361b83a LT |
129 | } |
130 | ||
5d2bd700 SS |
131 | static inline void fx_finit(struct i387_fxsave_struct *fx) |
132 | { | |
5d2bd700 | 133 | fx->cwd = 0x37f; |
a8615af4 | 134 | fx->mxcsr = MXCSR_DEFAULT; |
5d2bd700 SS |
135 | } |
136 | ||
1361b83a LT |
137 | extern void __sanitize_i387_state(struct task_struct *); |
138 | ||
139 | static inline void sanitize_i387_state(struct task_struct *tsk) | |
140 | { | |
141 | if (!use_xsaveopt()) | |
142 | return; | |
143 | __sanitize_i387_state(tsk); | |
144 | } | |
145 | ||
49b8c695 PA |
146 | #define user_insn(insn, output, input...) \ |
147 | ({ \ | |
148 | int err; \ | |
149 | asm volatile(ASM_STAC "\n" \ | |
150 | "1:" #insn "\n\t" \ | |
151 | "2: " ASM_CLAC "\n" \ | |
152 | ".section .fixup,\"ax\"\n" \ | |
153 | "3: movl $-1,%[err]\n" \ | |
154 | " jmp 2b\n" \ | |
155 | ".previous\n" \ | |
156 | _ASM_EXTABLE(1b, 3b) \ | |
157 | : [err] "=r" (err), output \ | |
158 | : "0"(0), input); \ | |
159 | err; \ | |
160 | }) | |
161 | ||
0ca5bd0d SS |
162 | #define check_insn(insn, output, input...) \ |
163 | ({ \ | |
164 | int err; \ | |
165 | asm volatile("1:" #insn "\n\t" \ | |
166 | "2:\n" \ | |
167 | ".section .fixup,\"ax\"\n" \ | |
168 | "3: movl $-1,%[err]\n" \ | |
169 | " jmp 2b\n" \ | |
170 | ".previous\n" \ | |
171 | _ASM_EXTABLE(1b, 3b) \ | |
172 | : [err] "=r" (err), output \ | |
173 | : "0"(0), input); \ | |
174 | err; \ | |
175 | }) | |
176 | ||
177 | static inline int fsave_user(struct i387_fsave_struct __user *fx) | |
1361b83a | 178 | { |
49b8c695 | 179 | return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); |
1361b83a LT |
180 | } |
181 | ||
182 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | |
183 | { | |
0ca5bd0d | 184 | if (config_enabled(CONFIG_X86_32)) |
49b8c695 | 185 | return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); |
0ca5bd0d | 186 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) |
49b8c695 | 187 | return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); |
1361b83a | 188 | |
0ca5bd0d | 189 | /* See comment in fpu_fxsave() below. */ |
49b8c695 | 190 | return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); |
1361b83a LT |
191 | } |
192 | ||
0ca5bd0d | 193 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
1361b83a | 194 | { |
0ca5bd0d SS |
195 | if (config_enabled(CONFIG_X86_32)) |
196 | return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
197 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | |
198 | return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
1361b83a | 199 | |
0ca5bd0d SS |
200 | /* See comment in fpu_fxsave() below. */ |
201 | return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), | |
202 | "m" (*fx)); | |
1361b83a LT |
203 | } |
204 | ||
e139e955 PA |
205 | static inline int fxrstor_user(struct i387_fxsave_struct __user *fx) |
206 | { | |
207 | if (config_enabled(CONFIG_X86_32)) | |
208 | return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
209 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | |
210 | return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
211 | ||
212 | /* See comment in fpu_fxsave() below. */ | |
213 | return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), | |
214 | "m" (*fx)); | |
215 | } | |
216 | ||
0ca5bd0d | 217 | static inline int frstor_checking(struct i387_fsave_struct *fx) |
1361b83a | 218 | { |
0ca5bd0d | 219 | return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
e139e955 PA |
220 | } |
221 | ||
222 | static inline int frstor_user(struct i387_fsave_struct __user *fx) | |
223 | { | |
224 | return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
1361b83a LT |
225 | } |
226 | ||
227 | static inline void fpu_fxsave(struct fpu *fpu) | |
228 | { | |
0ca5bd0d SS |
229 | if (config_enabled(CONFIG_X86_32)) |
230 | asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave)); | |
231 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | |
6ca7a8a1 | 232 | asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave)); |
0ca5bd0d SS |
233 | else { |
234 | /* Using "rex64; fxsave %0" is broken because, if the memory | |
235 | * operand uses any extended registers for addressing, a second | |
236 | * REX prefix will be generated (to the assembler, rex64 | |
237 | * followed by semicolon is a separate instruction), and hence | |
238 | * the 64-bitness is lost. | |
239 | * | |
240 | * Using "fxsaveq %0" would be the ideal choice, but is only | |
241 | * supported starting with gas 2.16. | |
242 | * | |
243 | * Using, as a workaround, the properly prefixed form below | |
244 | * isn't accepted by any binutils version so far released, | |
245 | * complaining that the same type of prefix is used twice if | |
246 | * an extended register is needed for addressing (fix submitted | |
247 | * to mainline 2005-11-21). | |
248 | * | |
249 | * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave)); | |
250 | * | |
251 | * This, however, we can work around by forcing the compiler to | |
252 | * select an addressing mode that doesn't require extended | |
253 | * registers. | |
254 | */ | |
255 | asm volatile( "rex64/fxsave (%[fx])" | |
256 | : "=m" (fpu->state->fxsave) | |
257 | : [fx] "R" (&fpu->state->fxsave)); | |
258 | } | |
1361b83a LT |
259 | } |
260 | ||
1361b83a LT |
261 | /* |
262 | * These must be called with preempt disabled. Returns | |
263 | * 'true' if the FPU state is still intact. | |
264 | */ | |
265 | static inline int fpu_save_init(struct fpu *fpu) | |
266 | { | |
267 | if (use_xsave()) { | |
0afc4a94 | 268 | xsave_state(&fpu->state->xsave); |
1361b83a LT |
269 | |
270 | /* | |
271 | * xsave header may indicate the init state of the FP. | |
272 | */ | |
273 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) | |
274 | return 1; | |
275 | } else if (use_fxsr()) { | |
276 | fpu_fxsave(fpu); | |
277 | } else { | |
278 | asm volatile("fnsave %[fx]; fwait" | |
279 | : [fx] "=m" (fpu->state->fsave)); | |
280 | return 0; | |
281 | } | |
282 | ||
283 | /* | |
284 | * If exceptions are pending, we need to clear them so | |
285 | * that we don't randomly get exceptions later. | |
286 | * | |
287 | * FIXME! Is this perhaps only true for the old-style | |
288 | * irq13 case? Maybe we could leave the x87 state | |
289 | * intact otherwise? | |
290 | */ | |
291 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) { | |
292 | asm volatile("fnclex"); | |
293 | return 0; | |
294 | } | |
295 | return 1; | |
296 | } | |
297 | ||
1361b83a LT |
298 | static inline int fpu_restore_checking(struct fpu *fpu) |
299 | { | |
300 | if (use_xsave()) | |
0ca5bd0d SS |
301 | return fpu_xrstor_checking(&fpu->state->xsave); |
302 | else if (use_fxsr()) | |
303 | return fxrstor_checking(&fpu->state->fxsave); | |
1361b83a | 304 | else |
0ca5bd0d | 305 | return frstor_checking(&fpu->state->fsave); |
1361b83a LT |
306 | } |
307 | ||
308 | static inline int restore_fpu_checking(struct task_struct *tsk) | |
309 | { | |
6ca7a8a1 BP |
310 | /* |
311 | * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is | |
312 | * pending. Clear the x87 state here by setting it to fixed values. | |
313 | * "m" is a random variable that should be in L1. | |
314 | */ | |
9b13a93d | 315 | if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) { |
26bef131 LT |
316 | asm volatile( |
317 | "fnclex\n\t" | |
318 | "emms\n\t" | |
319 | "fildl %P[addr]" /* set F?P to defined value */ | |
320 | : : [addr] "m" (tsk->thread.fpu.has_fpu)); | |
321 | } | |
1361b83a LT |
322 | |
323 | return fpu_restore_checking(&tsk->thread.fpu); | |
324 | } | |
325 | ||
1361b83a | 326 | /* Must be paired with an 'stts' after! */ |
36fe6175 | 327 | static inline void __thread_clear_has_fpu(struct fpu *fpu) |
1361b83a | 328 | { |
36fe6175 | 329 | fpu->has_fpu = 0; |
36b544dc | 330 | this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
1361b83a LT |
331 | } |
332 | ||
333 | /* Must be paired with a 'clts' before! */ | |
c0311f63 | 334 | static inline void __thread_set_has_fpu(struct fpu *fpu) |
1361b83a | 335 | { |
c0311f63 IM |
336 | fpu->has_fpu = 1; |
337 | this_cpu_write(fpu_fpregs_owner_ctx, fpu); | |
1361b83a LT |
338 | } |
339 | ||
340 | /* | |
341 | * Encapsulate the CR0.TS handling together with the | |
342 | * software flag. | |
343 | * | |
344 | * These generally need preemption protection to work, | |
345 | * do try to avoid using these on their own. | |
346 | */ | |
35191e3f | 347 | static inline void __thread_fpu_end(struct fpu *fpu) |
1361b83a | 348 | { |
35191e3f | 349 | __thread_clear_has_fpu(fpu); |
5d2bd700 | 350 | if (!use_eager_fpu()) |
304bceda | 351 | stts(); |
1361b83a LT |
352 | } |
353 | ||
4540d3fa | 354 | static inline void __thread_fpu_begin(struct fpu *fpu) |
1361b83a | 355 | { |
31d96338 | 356 | if (!use_eager_fpu()) |
304bceda | 357 | clts(); |
4540d3fa | 358 | __thread_set_has_fpu(fpu); |
1361b83a LT |
359 | } |
360 | ||
ca6787ba | 361 | static inline void drop_fpu(struct fpu *fpu) |
304bceda | 362 | { |
d2d0ac9a BP |
363 | /* |
364 | * Forget coprocessor state.. | |
365 | */ | |
366 | preempt_disable(); | |
ca6787ba | 367 | fpu->counter = 0; |
d2d0ac9a | 368 | |
276983f8 | 369 | if (fpu->has_fpu) { |
304bceda SS |
370 | /* Ignore delayed exceptions from user space */ |
371 | asm volatile("1: fwait\n" | |
372 | "2:\n" | |
373 | _ASM_EXTABLE(1b, 2b)); | |
35191e3f | 374 | __thread_fpu_end(fpu); |
304bceda | 375 | } |
304bceda | 376 | |
c5bedc68 | 377 | fpu->fpstate_active = 0; |
4c138410 | 378 | |
304bceda SS |
379 | preempt_enable(); |
380 | } | |
381 | ||
8f4d8186 ON |
382 | static inline void restore_init_xstate(void) |
383 | { | |
384 | if (use_xsave()) | |
385 | xrstor_state(init_xstate_buf, -1); | |
386 | else | |
387 | fxrstor_checking(&init_xstate_buf->i387); | |
388 | } | |
389 | ||
b85e67d1 BP |
390 | /* |
391 | * Reset the FPU state in the eager case and drop it in the lazy case (later use | |
392 | * will reinit it). | |
393 | */ | |
394 | static inline void fpu_reset_state(struct task_struct *tsk) | |
304bceda | 395 | { |
ca6787ba IM |
396 | struct fpu *fpu = &tsk->thread.fpu; |
397 | ||
5d2bd700 | 398 | if (!use_eager_fpu()) |
ca6787ba | 399 | drop_fpu(fpu); |
8f4d8186 ON |
400 | else |
401 | restore_init_xstate(); | |
304bceda SS |
402 | } |
403 | ||
1361b83a LT |
404 | /* |
405 | * FPU state switching for scheduling. | |
406 | * | |
407 | * This is a two-stage process: | |
408 | * | |
409 | * - switch_fpu_prepare() saves the old state and | |
410 | * sets the new state of the CR0.TS bit. This is | |
411 | * done within the context of the old process. | |
412 | * | |
413 | * - switch_fpu_finish() restores the new state as | |
414 | * necessary. | |
415 | */ | |
416 | typedef struct { int preload; } fpu_switch_t; | |
417 | ||
1361b83a LT |
418 | static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) |
419 | { | |
276983f8 | 420 | struct fpu *old_fpu = &old->thread.fpu; |
c0311f63 | 421 | struct fpu *new_fpu = &new->thread.fpu; |
1361b83a LT |
422 | fpu_switch_t fpu; |
423 | ||
304bceda SS |
424 | /* |
425 | * If the task has used the math, pre-load the FPU on xsave processors | |
426 | * or if the past 5 consecutive context-switches used math. | |
427 | */ | |
c5bedc68 | 428 | fpu.preload = new_fpu->fpstate_active && |
c0c2803d | 429 | (use_eager_fpu() || new->thread.fpu.counter > 5); |
1361ef29 | 430 | |
276983f8 | 431 | if (old_fpu->has_fpu) { |
6522d783 | 432 | if (!fpu_save_init(&old->thread.fpu)) |
6a5fe895 | 433 | task_disable_lazy_fpu_restore(old); |
1361ef29 RR |
434 | else |
435 | old->thread.fpu.last_cpu = cpu; | |
436 | ||
36b544dc | 437 | /* But leave fpu_fpregs_owner_ctx! */ |
1361ef29 | 438 | old->thread.fpu.has_fpu = 0; |
1361b83a LT |
439 | |
440 | /* Don't change CR0.TS if we just switch! */ | |
441 | if (fpu.preload) { | |
c0c2803d | 442 | new->thread.fpu.counter++; |
c0311f63 | 443 | __thread_set_has_fpu(new_fpu); |
1361b83a | 444 | prefetch(new->thread.fpu.state); |
5d2bd700 | 445 | } else if (!use_eager_fpu()) |
1361b83a LT |
446 | stts(); |
447 | } else { | |
c0c2803d | 448 | old->thread.fpu.counter = 0; |
6a5fe895 | 449 | task_disable_lazy_fpu_restore(old); |
1361b83a | 450 | if (fpu.preload) { |
c0c2803d | 451 | new->thread.fpu.counter++; |
728e53fe | 452 | if (fpu_lazy_restore(new, cpu)) |
1361b83a LT |
453 | fpu.preload = 0; |
454 | else | |
455 | prefetch(new->thread.fpu.state); | |
4540d3fa | 456 | __thread_fpu_begin(new_fpu); |
1361b83a LT |
457 | } |
458 | } | |
459 | return fpu; | |
460 | } | |
461 | ||
462 | /* | |
463 | * By the time this gets called, we've already cleared CR0.TS and | |
464 | * given the process the FPU if we are going to preload the FPU | |
465 | * state - all we need to do is to conditionally restore the register | |
466 | * state itself. | |
467 | */ | |
468 | static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) | |
469 | { | |
470 | if (fpu.preload) { | |
471 | if (unlikely(restore_fpu_checking(new))) | |
b85e67d1 | 472 | fpu_reset_state(new); |
1361b83a LT |
473 | } |
474 | } | |
475 | ||
476 | /* | |
477 | * Signal frame handlers... | |
478 | */ | |
72a671ce SS |
479 | extern int save_xstate_sig(void __user *buf, void __user *fx, int size); |
480 | extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size); | |
1361b83a | 481 | |
72a671ce | 482 | static inline int xstate_sigframe_size(void) |
1361b83a | 483 | { |
72a671ce SS |
484 | return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size; |
485 | } | |
486 | ||
487 | static inline int restore_xstate_sig(void __user *buf, int ia32_frame) | |
488 | { | |
489 | void __user *buf_fx = buf; | |
490 | int size = xstate_sigframe_size(); | |
491 | ||
492 | if (ia32_frame && use_fxsr()) { | |
493 | buf_fx = buf + sizeof(struct i387_fsave_struct); | |
494 | size += sizeof(struct i387_fsave_struct); | |
1361b83a | 495 | } |
72a671ce SS |
496 | |
497 | return __restore_xstate_sig(buf, buf_fx, size); | |
1361b83a LT |
498 | } |
499 | ||
500 | /* | |
fb14b4ea | 501 | * Needs to be preemption-safe. |
1361b83a | 502 | * |
377ffbcc | 503 | * NOTE! user_fpu_begin() must be used only immediately before restoring |
fb14b4ea ON |
504 | * the save state. It does not do any saving/restoring on its own. In |
505 | * lazy FPU mode, it is just an optimization to avoid a #NM exception, | |
506 | * the task can lose the FPU right after preempt_enable(). | |
1361b83a | 507 | */ |
1361b83a LT |
508 | static inline void user_fpu_begin(void) |
509 | { | |
4540d3fa IM |
510 | struct fpu *fpu = ¤t->thread.fpu; |
511 | ||
1361b83a LT |
512 | preempt_disable(); |
513 | if (!user_has_fpu()) | |
4540d3fa | 514 | __thread_fpu_begin(fpu); |
1361b83a LT |
515 | preempt_enable(); |
516 | } | |
517 | ||
5d2bd700 SS |
518 | static inline void __save_fpu(struct task_struct *tsk) |
519 | { | |
f41d830f FY |
520 | if (use_xsave()) { |
521 | if (unlikely(system_state == SYSTEM_BOOTING)) | |
3e261c14 | 522 | xsave_state_booting(&tsk->thread.fpu.state->xsave); |
f41d830f | 523 | else |
3e261c14 | 524 | xsave_state(&tsk->thread.fpu.state->xsave); |
f41d830f | 525 | } else |
5d2bd700 SS |
526 | fpu_fxsave(&tsk->thread.fpu); |
527 | } | |
528 | ||
1361b83a LT |
529 | /* |
530 | * i387 state interaction | |
531 | */ | |
532 | static inline unsigned short get_fpu_cwd(struct task_struct *tsk) | |
533 | { | |
534 | if (cpu_has_fxsr) { | |
535 | return tsk->thread.fpu.state->fxsave.cwd; | |
536 | } else { | |
537 | return (unsigned short)tsk->thread.fpu.state->fsave.cwd; | |
538 | } | |
539 | } | |
540 | ||
541 | static inline unsigned short get_fpu_swd(struct task_struct *tsk) | |
542 | { | |
543 | if (cpu_has_fxsr) { | |
544 | return tsk->thread.fpu.state->fxsave.swd; | |
545 | } else { | |
546 | return (unsigned short)tsk->thread.fpu.state->fsave.swd; | |
547 | } | |
548 | } | |
549 | ||
550 | static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) | |
551 | { | |
552 | if (cpu_has_xmm) { | |
553 | return tsk->thread.fpu.state->fxsave.mxcsr; | |
554 | } else { | |
555 | return MXCSR_DEFAULT; | |
556 | } | |
557 | } | |
558 | ||
8ffb53ab IM |
559 | extern void fpstate_cache_init(void); |
560 | ||
ed97b085 | 561 | extern int fpstate_alloc(struct fpu *fpu); |
5a12bf63 | 562 | extern void fpstate_free(struct fpu *fpu); |
a752b53d | 563 | extern int fpu__copy(struct task_struct *dst, struct task_struct *src); |
1361b83a | 564 | |
72a671ce SS |
565 | static inline unsigned long |
566 | alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx, | |
567 | unsigned long *size) | |
568 | { | |
569 | unsigned long frame_size = xstate_sigframe_size(); | |
570 | ||
571 | *buf_fx = sp = round_down(sp - frame_size, 64); | |
572 | if (ia32_frame && use_fxsr()) { | |
573 | frame_size += sizeof(struct i387_fsave_struct); | |
574 | sp -= sizeof(struct i387_fsave_struct); | |
575 | } | |
576 | ||
577 | *size = frame_size; | |
578 | return sp; | |
579 | } | |
1361b83a LT |
580 | |
581 | #endif |