Commit | Line | Data |
---|---|---|
1361b83a LT |
1 | /* |
2 | * Copyright (C) 1994 Linus Torvalds | |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | * x86-64 work by Andi Kleen 2002 | |
8 | */ | |
9 | ||
10 | #ifndef _FPU_INTERNAL_H | |
11 | #define _FPU_INTERNAL_H | |
12 | ||
13 | #include <linux/kernel_stat.h> | |
14 | #include <linux/regset.h> | |
050902c0 | 15 | #include <linux/compat.h> |
1361b83a LT |
16 | #include <linux/slab.h> |
17 | #include <asm/asm.h> | |
18 | #include <asm/cpufeature.h> | |
19 | #include <asm/processor.h> | |
20 | #include <asm/sigcontext.h> | |
21 | #include <asm/user.h> | |
22 | #include <asm/uaccess.h> | |
23 | #include <asm/xsave.h> | |
49b8c695 | 24 | #include <asm/smap.h> |
1361b83a | 25 | |
72a671ce SS |
26 | #ifdef CONFIG_X86_64 |
27 | # include <asm/sigcontext32.h> | |
28 | # include <asm/user32.h> | |
235b8022 AV |
29 | struct ksignal; |
30 | int ia32_setup_rt_frame(int sig, struct ksignal *ksig, | |
72a671ce | 31 | compat_sigset_t *set, struct pt_regs *regs); |
235b8022 | 32 | int ia32_setup_frame(int sig, struct ksignal *ksig, |
72a671ce SS |
33 | compat_sigset_t *set, struct pt_regs *regs); |
34 | #else | |
35 | # define user_i387_ia32_struct user_i387_struct | |
36 | # define user32_fxsr_struct user_fxsr_struct | |
37 | # define ia32_setup_frame __setup_frame | |
38 | # define ia32_setup_rt_frame __setup_rt_frame | |
39 | #endif | |
40 | ||
41 | extern unsigned int mxcsr_feature_mask; | |
1361b83a | 42 | extern void fpu_init(void); |
5d2bd700 | 43 | extern void eager_fpu_init(void); |
1361b83a LT |
44 | |
45 | DECLARE_PER_CPU(struct task_struct *, fpu_owner_task); | |
46 | ||
72a671ce SS |
47 | extern void convert_from_fxsr(struct user_i387_ia32_struct *env, |
48 | struct task_struct *tsk); | |
49 | extern void convert_to_fxsr(struct task_struct *tsk, | |
50 | const struct user_i387_ia32_struct *env); | |
51 | ||
1361b83a LT |
52 | extern user_regset_active_fn fpregs_active, xfpregs_active; |
53 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, | |
54 | xstateregs_get; | |
55 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, | |
56 | xstateregs_set; | |
57 | ||
1361b83a LT |
58 | /* |
59 | * xstateregs_active == fpregs_active. Please refer to the comment | |
60 | * at the definition of fpregs_active. | |
61 | */ | |
62 | #define xstateregs_active fpregs_active | |
63 | ||
1361b83a LT |
64 | #ifdef CONFIG_MATH_EMULATION |
65 | extern void finit_soft_fpu(struct i387_soft_struct *soft); | |
66 | #else | |
67 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} | |
68 | #endif | |
69 | ||
050902c0 SS |
70 | static inline int is_ia32_compat_frame(void) |
71 | { | |
72 | return config_enabled(CONFIG_IA32_EMULATION) && | |
73 | test_thread_flag(TIF_IA32); | |
74 | } | |
75 | ||
76 | static inline int is_ia32_frame(void) | |
77 | { | |
78 | return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame(); | |
79 | } | |
80 | ||
81 | static inline int is_x32_frame(void) | |
82 | { | |
83 | return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32); | |
84 | } | |
85 | ||
1361b83a LT |
86 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ |
87 | ||
5d2bd700 SS |
88 | static __always_inline __pure bool use_eager_fpu(void) |
89 | { | |
90 | return static_cpu_has(X86_FEATURE_EAGER_FPU); | |
91 | } | |
92 | ||
1361b83a LT |
93 | static __always_inline __pure bool use_xsaveopt(void) |
94 | { | |
95 | return static_cpu_has(X86_FEATURE_XSAVEOPT); | |
96 | } | |
97 | ||
98 | static __always_inline __pure bool use_xsave(void) | |
99 | { | |
100 | return static_cpu_has(X86_FEATURE_XSAVE); | |
101 | } | |
102 | ||
103 | static __always_inline __pure bool use_fxsr(void) | |
104 | { | |
105 | return static_cpu_has(X86_FEATURE_FXSR); | |
106 | } | |
107 | ||
5d2bd700 SS |
108 | static inline void fx_finit(struct i387_fxsave_struct *fx) |
109 | { | |
110 | memset(fx, 0, xstate_size); | |
111 | fx->cwd = 0x37f; | |
a8615af4 | 112 | fx->mxcsr = MXCSR_DEFAULT; |
5d2bd700 SS |
113 | } |
114 | ||
1361b83a LT |
115 | extern void __sanitize_i387_state(struct task_struct *); |
116 | ||
117 | static inline void sanitize_i387_state(struct task_struct *tsk) | |
118 | { | |
119 | if (!use_xsaveopt()) | |
120 | return; | |
121 | __sanitize_i387_state(tsk); | |
122 | } | |
123 | ||
49b8c695 PA |
124 | #define user_insn(insn, output, input...) \ |
125 | ({ \ | |
126 | int err; \ | |
127 | asm volatile(ASM_STAC "\n" \ | |
128 | "1:" #insn "\n\t" \ | |
129 | "2: " ASM_CLAC "\n" \ | |
130 | ".section .fixup,\"ax\"\n" \ | |
131 | "3: movl $-1,%[err]\n" \ | |
132 | " jmp 2b\n" \ | |
133 | ".previous\n" \ | |
134 | _ASM_EXTABLE(1b, 3b) \ | |
135 | : [err] "=r" (err), output \ | |
136 | : "0"(0), input); \ | |
137 | err; \ | |
138 | }) | |
139 | ||
0ca5bd0d SS |
140 | #define check_insn(insn, output, input...) \ |
141 | ({ \ | |
142 | int err; \ | |
143 | asm volatile("1:" #insn "\n\t" \ | |
144 | "2:\n" \ | |
145 | ".section .fixup,\"ax\"\n" \ | |
146 | "3: movl $-1,%[err]\n" \ | |
147 | " jmp 2b\n" \ | |
148 | ".previous\n" \ | |
149 | _ASM_EXTABLE(1b, 3b) \ | |
150 | : [err] "=r" (err), output \ | |
151 | : "0"(0), input); \ | |
152 | err; \ | |
153 | }) | |
154 | ||
155 | static inline int fsave_user(struct i387_fsave_struct __user *fx) | |
1361b83a | 156 | { |
49b8c695 | 157 | return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); |
1361b83a LT |
158 | } |
159 | ||
160 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | |
161 | { | |
0ca5bd0d | 162 | if (config_enabled(CONFIG_X86_32)) |
49b8c695 | 163 | return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); |
0ca5bd0d | 164 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) |
49b8c695 | 165 | return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); |
1361b83a | 166 | |
0ca5bd0d | 167 | /* See comment in fpu_fxsave() below. */ |
49b8c695 | 168 | return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); |
1361b83a LT |
169 | } |
170 | ||
0ca5bd0d | 171 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
1361b83a | 172 | { |
0ca5bd0d SS |
173 | if (config_enabled(CONFIG_X86_32)) |
174 | return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
175 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | |
176 | return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
1361b83a | 177 | |
0ca5bd0d SS |
178 | /* See comment in fpu_fxsave() below. */ |
179 | return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), | |
180 | "m" (*fx)); | |
1361b83a LT |
181 | } |
182 | ||
e139e955 PA |
183 | static inline int fxrstor_user(struct i387_fxsave_struct __user *fx) |
184 | { | |
185 | if (config_enabled(CONFIG_X86_32)) | |
186 | return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
187 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | |
188 | return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
189 | ||
190 | /* See comment in fpu_fxsave() below. */ | |
191 | return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), | |
192 | "m" (*fx)); | |
193 | } | |
194 | ||
0ca5bd0d | 195 | static inline int frstor_checking(struct i387_fsave_struct *fx) |
1361b83a | 196 | { |
0ca5bd0d | 197 | return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
e139e955 PA |
198 | } |
199 | ||
200 | static inline int frstor_user(struct i387_fsave_struct __user *fx) | |
201 | { | |
202 | return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
1361b83a LT |
203 | } |
204 | ||
205 | static inline void fpu_fxsave(struct fpu *fpu) | |
206 | { | |
0ca5bd0d SS |
207 | if (config_enabled(CONFIG_X86_32)) |
208 | asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave)); | |
209 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | |
210 | asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave)); | |
211 | else { | |
212 | /* Using "rex64; fxsave %0" is broken because, if the memory | |
213 | * operand uses any extended registers for addressing, a second | |
214 | * REX prefix will be generated (to the assembler, rex64 | |
215 | * followed by semicolon is a separate instruction), and hence | |
216 | * the 64-bitness is lost. | |
217 | * | |
218 | * Using "fxsaveq %0" would be the ideal choice, but is only | |
219 | * supported starting with gas 2.16. | |
220 | * | |
221 | * Using, as a workaround, the properly prefixed form below | |
222 | * isn't accepted by any binutils version so far released, | |
223 | * complaining that the same type of prefix is used twice if | |
224 | * an extended register is needed for addressing (fix submitted | |
225 | * to mainline 2005-11-21). | |
226 | * | |
227 | * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave)); | |
228 | * | |
229 | * This, however, we can work around by forcing the compiler to | |
230 | * select an addressing mode that doesn't require extended | |
231 | * registers. | |
232 | */ | |
233 | asm volatile( "rex64/fxsave (%[fx])" | |
234 | : "=m" (fpu->state->fxsave) | |
235 | : [fx] "R" (&fpu->state->fxsave)); | |
236 | } | |
1361b83a LT |
237 | } |
238 | ||
1361b83a LT |
239 | /* |
240 | * These must be called with preempt disabled. Returns | |
241 | * 'true' if the FPU state is still intact. | |
242 | */ | |
243 | static inline int fpu_save_init(struct fpu *fpu) | |
244 | { | |
245 | if (use_xsave()) { | |
246 | fpu_xsave(fpu); | |
247 | ||
248 | /* | |
249 | * xsave header may indicate the init state of the FP. | |
250 | */ | |
251 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) | |
252 | return 1; | |
253 | } else if (use_fxsr()) { | |
254 | fpu_fxsave(fpu); | |
255 | } else { | |
256 | asm volatile("fnsave %[fx]; fwait" | |
257 | : [fx] "=m" (fpu->state->fsave)); | |
258 | return 0; | |
259 | } | |
260 | ||
261 | /* | |
262 | * If exceptions are pending, we need to clear them so | |
263 | * that we don't randomly get exceptions later. | |
264 | * | |
265 | * FIXME! Is this perhaps only true for the old-style | |
266 | * irq13 case? Maybe we could leave the x87 state | |
267 | * intact otherwise? | |
268 | */ | |
269 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) { | |
270 | asm volatile("fnclex"); | |
271 | return 0; | |
272 | } | |
273 | return 1; | |
274 | } | |
275 | ||
276 | static inline int __save_init_fpu(struct task_struct *tsk) | |
277 | { | |
278 | return fpu_save_init(&tsk->thread.fpu); | |
279 | } | |
280 | ||
1361b83a LT |
281 | static inline int fpu_restore_checking(struct fpu *fpu) |
282 | { | |
283 | if (use_xsave()) | |
0ca5bd0d SS |
284 | return fpu_xrstor_checking(&fpu->state->xsave); |
285 | else if (use_fxsr()) | |
286 | return fxrstor_checking(&fpu->state->fxsave); | |
1361b83a | 287 | else |
0ca5bd0d | 288 | return frstor_checking(&fpu->state->fsave); |
1361b83a LT |
289 | } |
290 | ||
291 | static inline int restore_fpu_checking(struct task_struct *tsk) | |
292 | { | |
293 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | |
294 | is pending. Clear the x87 state here by setting it to fixed | |
295 | values. "m" is a random variable that should be in L1 */ | |
296 | alternative_input( | |
297 | ASM_NOP8 ASM_NOP2, | |
298 | "emms\n\t" /* clear stack tags */ | |
299 | "fildl %P[addr]", /* set F?P to defined value */ | |
300 | X86_FEATURE_FXSAVE_LEAK, | |
301 | [addr] "m" (tsk->thread.fpu.has_fpu)); | |
302 | ||
303 | return fpu_restore_checking(&tsk->thread.fpu); | |
304 | } | |
305 | ||
306 | /* | |
307 | * Software FPU state helpers. Careful: these need to | |
308 | * be preemption protection *and* they need to be | |
309 | * properly paired with the CR0.TS changes! | |
310 | */ | |
311 | static inline int __thread_has_fpu(struct task_struct *tsk) | |
312 | { | |
313 | return tsk->thread.fpu.has_fpu; | |
314 | } | |
315 | ||
316 | /* Must be paired with an 'stts' after! */ | |
317 | static inline void __thread_clear_has_fpu(struct task_struct *tsk) | |
318 | { | |
319 | tsk->thread.fpu.has_fpu = 0; | |
c6ae41e7 | 320 | this_cpu_write(fpu_owner_task, NULL); |
1361b83a LT |
321 | } |
322 | ||
323 | /* Must be paired with a 'clts' before! */ | |
324 | static inline void __thread_set_has_fpu(struct task_struct *tsk) | |
325 | { | |
326 | tsk->thread.fpu.has_fpu = 1; | |
c6ae41e7 | 327 | this_cpu_write(fpu_owner_task, tsk); |
1361b83a LT |
328 | } |
329 | ||
330 | /* | |
331 | * Encapsulate the CR0.TS handling together with the | |
332 | * software flag. | |
333 | * | |
334 | * These generally need preemption protection to work, | |
335 | * do try to avoid using these on their own. | |
336 | */ | |
337 | static inline void __thread_fpu_end(struct task_struct *tsk) | |
338 | { | |
339 | __thread_clear_has_fpu(tsk); | |
5d2bd700 | 340 | if (!use_eager_fpu()) |
304bceda | 341 | stts(); |
1361b83a LT |
342 | } |
343 | ||
344 | static inline void __thread_fpu_begin(struct task_struct *tsk) | |
345 | { | |
5f8c4218 | 346 | if (!static_cpu_has_safe(X86_FEATURE_EAGER_FPU)) |
304bceda | 347 | clts(); |
1361b83a LT |
348 | __thread_set_has_fpu(tsk); |
349 | } | |
350 | ||
304bceda SS |
351 | static inline void __drop_fpu(struct task_struct *tsk) |
352 | { | |
353 | if (__thread_has_fpu(tsk)) { | |
354 | /* Ignore delayed exceptions from user space */ | |
355 | asm volatile("1: fwait\n" | |
356 | "2:\n" | |
357 | _ASM_EXTABLE(1b, 2b)); | |
358 | __thread_fpu_end(tsk); | |
359 | } | |
360 | } | |
361 | ||
362 | static inline void drop_fpu(struct task_struct *tsk) | |
363 | { | |
364 | /* | |
365 | * Forget coprocessor state.. | |
366 | */ | |
367 | preempt_disable(); | |
368 | tsk->fpu_counter = 0; | |
369 | __drop_fpu(tsk); | |
370 | clear_used_math(); | |
371 | preempt_enable(); | |
372 | } | |
373 | ||
374 | static inline void drop_init_fpu(struct task_struct *tsk) | |
375 | { | |
5d2bd700 | 376 | if (!use_eager_fpu()) |
304bceda | 377 | drop_fpu(tsk); |
5d2bd700 SS |
378 | else { |
379 | if (use_xsave()) | |
380 | xrstor_state(init_xstate_buf, -1); | |
381 | else | |
382 | fxrstor_checking(&init_xstate_buf->i387); | |
383 | } | |
304bceda SS |
384 | } |
385 | ||
1361b83a LT |
386 | /* |
387 | * FPU state switching for scheduling. | |
388 | * | |
389 | * This is a two-stage process: | |
390 | * | |
391 | * - switch_fpu_prepare() saves the old state and | |
392 | * sets the new state of the CR0.TS bit. This is | |
393 | * done within the context of the old process. | |
394 | * | |
395 | * - switch_fpu_finish() restores the new state as | |
396 | * necessary. | |
397 | */ | |
398 | typedef struct { int preload; } fpu_switch_t; | |
399 | ||
400 | /* | |
644c1541 VP |
401 | * Must be run with preemption disabled: this clears the fpu_owner_task, |
402 | * on this CPU. | |
1361b83a | 403 | * |
644c1541 VP |
404 | * This will disable any lazy FPU state restore of the current FPU state, |
405 | * but if the current thread owns the FPU, it will still be saved by. | |
1361b83a | 406 | */ |
644c1541 VP |
407 | static inline void __cpu_disable_lazy_restore(unsigned int cpu) |
408 | { | |
409 | per_cpu(fpu_owner_task, cpu) = NULL; | |
410 | } | |
411 | ||
1361b83a LT |
412 | static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) |
413 | { | |
c6ae41e7 | 414 | return new == this_cpu_read_stable(fpu_owner_task) && |
1361b83a LT |
415 | cpu == new->thread.fpu.last_cpu; |
416 | } | |
417 | ||
418 | static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) | |
419 | { | |
420 | fpu_switch_t fpu; | |
421 | ||
304bceda SS |
422 | /* |
423 | * If the task has used the math, pre-load the FPU on xsave processors | |
424 | * or if the past 5 consecutive context-switches used math. | |
425 | */ | |
5d2bd700 | 426 | fpu.preload = tsk_used_math(new) && (use_eager_fpu() || |
304bceda | 427 | new->fpu_counter > 5); |
1361b83a LT |
428 | if (__thread_has_fpu(old)) { |
429 | if (!__save_init_fpu(old)) | |
430 | cpu = ~0; | |
431 | old->thread.fpu.last_cpu = cpu; | |
432 | old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */ | |
433 | ||
434 | /* Don't change CR0.TS if we just switch! */ | |
435 | if (fpu.preload) { | |
436 | new->fpu_counter++; | |
437 | __thread_set_has_fpu(new); | |
438 | prefetch(new->thread.fpu.state); | |
5d2bd700 | 439 | } else if (!use_eager_fpu()) |
1361b83a LT |
440 | stts(); |
441 | } else { | |
442 | old->fpu_counter = 0; | |
443 | old->thread.fpu.last_cpu = ~0; | |
444 | if (fpu.preload) { | |
445 | new->fpu_counter++; | |
5d2bd700 | 446 | if (!use_eager_fpu() && fpu_lazy_restore(new, cpu)) |
1361b83a LT |
447 | fpu.preload = 0; |
448 | else | |
449 | prefetch(new->thread.fpu.state); | |
450 | __thread_fpu_begin(new); | |
451 | } | |
452 | } | |
453 | return fpu; | |
454 | } | |
455 | ||
456 | /* | |
457 | * By the time this gets called, we've already cleared CR0.TS and | |
458 | * given the process the FPU if we are going to preload the FPU | |
459 | * state - all we need to do is to conditionally restore the register | |
460 | * state itself. | |
461 | */ | |
462 | static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) | |
463 | { | |
464 | if (fpu.preload) { | |
465 | if (unlikely(restore_fpu_checking(new))) | |
304bceda | 466 | drop_init_fpu(new); |
1361b83a LT |
467 | } |
468 | } | |
469 | ||
470 | /* | |
471 | * Signal frame handlers... | |
472 | */ | |
72a671ce SS |
473 | extern int save_xstate_sig(void __user *buf, void __user *fx, int size); |
474 | extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size); | |
1361b83a | 475 | |
72a671ce | 476 | static inline int xstate_sigframe_size(void) |
1361b83a | 477 | { |
72a671ce SS |
478 | return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size; |
479 | } | |
480 | ||
481 | static inline int restore_xstate_sig(void __user *buf, int ia32_frame) | |
482 | { | |
483 | void __user *buf_fx = buf; | |
484 | int size = xstate_sigframe_size(); | |
485 | ||
486 | if (ia32_frame && use_fxsr()) { | |
487 | buf_fx = buf + sizeof(struct i387_fsave_struct); | |
488 | size += sizeof(struct i387_fsave_struct); | |
1361b83a | 489 | } |
72a671ce SS |
490 | |
491 | return __restore_xstate_sig(buf, buf_fx, size); | |
1361b83a LT |
492 | } |
493 | ||
494 | /* | |
377ffbcc | 495 | * Need to be preemption-safe. |
1361b83a | 496 | * |
377ffbcc SS |
497 | * NOTE! user_fpu_begin() must be used only immediately before restoring |
498 | * it. This function does not do any save/restore on their own. | |
1361b83a | 499 | */ |
1361b83a LT |
500 | static inline void user_fpu_begin(void) |
501 | { | |
502 | preempt_disable(); | |
503 | if (!user_has_fpu()) | |
504 | __thread_fpu_begin(current); | |
505 | preempt_enable(); | |
506 | } | |
507 | ||
5d2bd700 SS |
508 | static inline void __save_fpu(struct task_struct *tsk) |
509 | { | |
510 | if (use_xsave()) | |
511 | xsave_state(&tsk->thread.fpu.state->xsave, -1); | |
512 | else | |
513 | fpu_fxsave(&tsk->thread.fpu); | |
514 | } | |
515 | ||
1361b83a LT |
516 | /* |
517 | * These disable preemption on their own and are safe | |
518 | */ | |
519 | static inline void save_init_fpu(struct task_struct *tsk) | |
520 | { | |
521 | WARN_ON_ONCE(!__thread_has_fpu(tsk)); | |
304bceda | 522 | |
5d2bd700 SS |
523 | if (use_eager_fpu()) { |
524 | __save_fpu(tsk); | |
304bceda SS |
525 | return; |
526 | } | |
527 | ||
1361b83a LT |
528 | preempt_disable(); |
529 | __save_init_fpu(tsk); | |
530 | __thread_fpu_end(tsk); | |
531 | preempt_enable(); | |
532 | } | |
533 | ||
1361b83a LT |
534 | /* |
535 | * i387 state interaction | |
536 | */ | |
537 | static inline unsigned short get_fpu_cwd(struct task_struct *tsk) | |
538 | { | |
539 | if (cpu_has_fxsr) { | |
540 | return tsk->thread.fpu.state->fxsave.cwd; | |
541 | } else { | |
542 | return (unsigned short)tsk->thread.fpu.state->fsave.cwd; | |
543 | } | |
544 | } | |
545 | ||
546 | static inline unsigned short get_fpu_swd(struct task_struct *tsk) | |
547 | { | |
548 | if (cpu_has_fxsr) { | |
549 | return tsk->thread.fpu.state->fxsave.swd; | |
550 | } else { | |
551 | return (unsigned short)tsk->thread.fpu.state->fsave.swd; | |
552 | } | |
553 | } | |
554 | ||
555 | static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) | |
556 | { | |
557 | if (cpu_has_xmm) { | |
558 | return tsk->thread.fpu.state->fxsave.mxcsr; | |
559 | } else { | |
560 | return MXCSR_DEFAULT; | |
561 | } | |
562 | } | |
563 | ||
564 | static bool fpu_allocated(struct fpu *fpu) | |
565 | { | |
566 | return fpu->state != NULL; | |
567 | } | |
568 | ||
569 | static inline int fpu_alloc(struct fpu *fpu) | |
570 | { | |
571 | if (fpu_allocated(fpu)) | |
572 | return 0; | |
573 | fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL); | |
574 | if (!fpu->state) | |
575 | return -ENOMEM; | |
576 | WARN_ON((unsigned long)fpu->state & 15); | |
577 | return 0; | |
578 | } | |
579 | ||
580 | static inline void fpu_free(struct fpu *fpu) | |
581 | { | |
582 | if (fpu->state) { | |
583 | kmem_cache_free(task_xstate_cachep, fpu->state); | |
584 | fpu->state = NULL; | |
585 | } | |
586 | } | |
587 | ||
304bceda | 588 | static inline void fpu_copy(struct task_struct *dst, struct task_struct *src) |
1361b83a | 589 | { |
5d2bd700 SS |
590 | if (use_eager_fpu()) { |
591 | memset(&dst->thread.fpu.state->xsave, 0, xstate_size); | |
592 | __save_fpu(dst); | |
304bceda SS |
593 | } else { |
594 | struct fpu *dfpu = &dst->thread.fpu; | |
595 | struct fpu *sfpu = &src->thread.fpu; | |
596 | ||
597 | unlazy_fpu(src); | |
598 | memcpy(dfpu->state, sfpu->state, xstate_size); | |
599 | } | |
1361b83a LT |
600 | } |
601 | ||
72a671ce SS |
602 | static inline unsigned long |
603 | alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx, | |
604 | unsigned long *size) | |
605 | { | |
606 | unsigned long frame_size = xstate_sigframe_size(); | |
607 | ||
608 | *buf_fx = sp = round_down(sp - frame_size, 64); | |
609 | if (ia32_frame && use_fxsr()) { | |
610 | frame_size += sizeof(struct i387_fsave_struct); | |
611 | sp -= sizeof(struct i387_fsave_struct); | |
612 | } | |
613 | ||
614 | *size = frame_size; | |
615 | return sp; | |
616 | } | |
1361b83a LT |
617 | |
618 | #endif |