2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
10 #ifndef _FPU_INTERNAL_H
11 #define _FPU_INTERNAL_H
13 #include <linux/kernel_stat.h>
14 #include <linux/regset.h>
15 #include <linux/compat.h>
16 #include <linux/slab.h>
18 #include <asm/cpufeature.h>
19 #include <asm/processor.h>
20 #include <asm/sigcontext.h>
22 #include <asm/uaccess.h>
23 #include <asm/xsave.h>
27 # include <asm/sigcontext32.h>
28 # include <asm/user32.h>
29 int ia32_setup_rt_frame(int sig
, struct k_sigaction
*ka
, siginfo_t
*info
,
30 compat_sigset_t
*set
, struct pt_regs
*regs
);
31 int ia32_setup_frame(int sig
, struct k_sigaction
*ka
,
32 compat_sigset_t
*set
, struct pt_regs
*regs
);
34 # define user_i387_ia32_struct user_i387_struct
35 # define user32_fxsr_struct user_fxsr_struct
36 # define ia32_setup_frame __setup_frame
37 # define ia32_setup_rt_frame __setup_rt_frame
40 extern unsigned int mxcsr_feature_mask
;
41 extern void fpu_init(void);
42 extern void eager_fpu_init(void);
44 DECLARE_PER_CPU(struct task_struct
*, fpu_owner_task
);
46 extern void convert_from_fxsr(struct user_i387_ia32_struct
*env
,
47 struct task_struct
*tsk
);
48 extern void convert_to_fxsr(struct task_struct
*tsk
,
49 const struct user_i387_ia32_struct
*env
);
51 extern user_regset_active_fn fpregs_active
, xfpregs_active
;
52 extern user_regset_get_fn fpregs_get
, xfpregs_get
, fpregs_soft_get
,
54 extern user_regset_set_fn fpregs_set
, xfpregs_set
, fpregs_soft_set
,
58 * xstateregs_active == fpregs_active. Please refer to the comment
59 * at the definition of fpregs_active.
61 #define xstateregs_active fpregs_active
63 #ifdef CONFIG_MATH_EMULATION
64 # define HAVE_HWFP (boot_cpu_data.hard_math)
65 extern void finit_soft_fpu(struct i387_soft_struct
*soft
);
68 static inline void finit_soft_fpu(struct i387_soft_struct
*soft
) {}
71 static inline int is_ia32_compat_frame(void)
73 return config_enabled(CONFIG_IA32_EMULATION
) &&
74 test_thread_flag(TIF_IA32
);
77 static inline int is_ia32_frame(void)
79 return config_enabled(CONFIG_X86_32
) || is_ia32_compat_frame();
82 static inline int is_x32_frame(void)
84 return config_enabled(CONFIG_X86_X32_ABI
) && test_thread_flag(TIF_X32
);
87 #define X87_FSW_ES (1 << 7) /* Exception Summary */
89 static __always_inline __pure
bool use_eager_fpu(void)
91 return static_cpu_has(X86_FEATURE_EAGER_FPU
);
94 static __always_inline __pure
bool use_xsaveopt(void)
96 return static_cpu_has(X86_FEATURE_XSAVEOPT
);
99 static __always_inline __pure
bool use_xsave(void)
101 return static_cpu_has(X86_FEATURE_XSAVE
);
104 static __always_inline __pure
bool use_fxsr(void)
106 return static_cpu_has(X86_FEATURE_FXSR
);
109 static inline void fx_finit(struct i387_fxsave_struct
*fx
)
111 memset(fx
, 0, xstate_size
);
113 fx
->mxcsr
= MXCSR_DEFAULT
;
116 extern void __sanitize_i387_state(struct task_struct
*);
118 static inline void sanitize_i387_state(struct task_struct
*tsk
)
122 __sanitize_i387_state(tsk
);
125 #define user_insn(insn, output, input...) \
128 asm volatile(ASM_STAC "\n" \
130 "2: " ASM_CLAC "\n" \
131 ".section .fixup,\"ax\"\n" \
132 "3: movl $-1,%[err]\n" \
135 _ASM_EXTABLE(1b, 3b) \
136 : [err] "=r" (err), output \
141 #define check_insn(insn, output, input...) \
144 asm volatile("1:" #insn "\n\t" \
146 ".section .fixup,\"ax\"\n" \
147 "3: movl $-1,%[err]\n" \
150 _ASM_EXTABLE(1b, 3b) \
151 : [err] "=r" (err), output \
156 static inline int fsave_user(struct i387_fsave_struct __user
*fx
)
158 return user_insn(fnsave
%[fx
]; fwait
, [fx
] "=m" (*fx
), "m" (*fx
));
161 static inline int fxsave_user(struct i387_fxsave_struct __user
*fx
)
163 if (config_enabled(CONFIG_X86_32
))
164 return user_insn(fxsave
%[fx
], [fx
] "=m" (*fx
), "m" (*fx
));
165 else if (config_enabled(CONFIG_AS_FXSAVEQ
))
166 return user_insn(fxsaveq
%[fx
], [fx
] "=m" (*fx
), "m" (*fx
));
168 /* See comment in fpu_fxsave() below. */
169 return user_insn(rex64
/fxsave (%[fx
]), "=m" (*fx
), [fx
] "R" (fx
));
172 static inline int fxrstor_checking(struct i387_fxsave_struct
*fx
)
174 if (config_enabled(CONFIG_X86_32
))
175 return check_insn(fxrstor
%[fx
], "=m" (*fx
), [fx
] "m" (*fx
));
176 else if (config_enabled(CONFIG_AS_FXSAVEQ
))
177 return check_insn(fxrstorq
%[fx
], "=m" (*fx
), [fx
] "m" (*fx
));
179 /* See comment in fpu_fxsave() below. */
180 return check_insn(rex64
/fxrstor (%[fx
]), "=m" (*fx
), [fx
] "R" (fx
),
184 static inline int fxrstor_user(struct i387_fxsave_struct __user
*fx
)
186 if (config_enabled(CONFIG_X86_32
))
187 return user_insn(fxrstor
%[fx
], "=m" (*fx
), [fx
] "m" (*fx
));
188 else if (config_enabled(CONFIG_AS_FXSAVEQ
))
189 return user_insn(fxrstorq
%[fx
], "=m" (*fx
), [fx
] "m" (*fx
));
191 /* See comment in fpu_fxsave() below. */
192 return user_insn(rex64
/fxrstor (%[fx
]), "=m" (*fx
), [fx
] "R" (fx
),
196 static inline int frstor_checking(struct i387_fsave_struct
*fx
)
198 return check_insn(frstor
%[fx
], "=m" (*fx
), [fx
] "m" (*fx
));
201 static inline int frstor_user(struct i387_fsave_struct __user
*fx
)
203 return user_insn(frstor
%[fx
], "=m" (*fx
), [fx
] "m" (*fx
));
206 static inline void fpu_fxsave(struct fpu
*fpu
)
208 if (config_enabled(CONFIG_X86_32
))
209 asm volatile( "fxsave %[fx]" : [fx
] "=m" (fpu
->state
->fxsave
));
210 else if (config_enabled(CONFIG_AS_FXSAVEQ
))
211 asm volatile("fxsaveq %0" : "=m" (fpu
->state
->fxsave
));
213 /* Using "rex64; fxsave %0" is broken because, if the memory
214 * operand uses any extended registers for addressing, a second
215 * REX prefix will be generated (to the assembler, rex64
216 * followed by semicolon is a separate instruction), and hence
217 * the 64-bitness is lost.
219 * Using "fxsaveq %0" would be the ideal choice, but is only
220 * supported starting with gas 2.16.
222 * Using, as a workaround, the properly prefixed form below
223 * isn't accepted by any binutils version so far released,
224 * complaining that the same type of prefix is used twice if
225 * an extended register is needed for addressing (fix submitted
226 * to mainline 2005-11-21).
228 * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
230 * This, however, we can work around by forcing the compiler to
231 * select an addressing mode that doesn't require extended
234 asm volatile( "rex64/fxsave (%[fx])"
235 : "=m" (fpu
->state
->fxsave
)
236 : [fx
] "R" (&fpu
->state
->fxsave
));
241 * These must be called with preempt disabled. Returns
242 * 'true' if the FPU state is still intact.
244 static inline int fpu_save_init(struct fpu
*fpu
)
250 * xsave header may indicate the init state of the FP.
252 if (!(fpu
->state
->xsave
.xsave_hdr
.xstate_bv
& XSTATE_FP
))
254 } else if (use_fxsr()) {
257 asm volatile("fnsave %[fx]; fwait"
258 : [fx
] "=m" (fpu
->state
->fsave
));
263 * If exceptions are pending, we need to clear them so
264 * that we don't randomly get exceptions later.
266 * FIXME! Is this perhaps only true for the old-style
267 * irq13 case? Maybe we could leave the x87 state
270 if (unlikely(fpu
->state
->fxsave
.swd
& X87_FSW_ES
)) {
271 asm volatile("fnclex");
277 static inline int __save_init_fpu(struct task_struct
*tsk
)
279 return fpu_save_init(&tsk
->thread
.fpu
);
282 static inline int fpu_restore_checking(struct fpu
*fpu
)
285 return fpu_xrstor_checking(&fpu
->state
->xsave
);
287 return fxrstor_checking(&fpu
->state
->fxsave
);
289 return frstor_checking(&fpu
->state
->fsave
);
292 static inline int restore_fpu_checking(struct task_struct
*tsk
)
294 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
295 is pending. Clear the x87 state here by setting it to fixed
296 values. "m" is a random variable that should be in L1 */
299 "emms\n\t" /* clear stack tags */
300 "fildl %P[addr]", /* set F?P to defined value */
301 X86_FEATURE_FXSAVE_LEAK
,
302 [addr
] "m" (tsk
->thread
.fpu
.has_fpu
));
304 return fpu_restore_checking(&tsk
->thread
.fpu
);
308 * Software FPU state helpers. Careful: these need to
309 * be preemption protection *and* they need to be
310 * properly paired with the CR0.TS changes!
312 static inline int __thread_has_fpu(struct task_struct
*tsk
)
314 return tsk
->thread
.fpu
.has_fpu
;
317 /* Must be paired with an 'stts' after! */
318 static inline void __thread_clear_has_fpu(struct task_struct
*tsk
)
320 tsk
->thread
.fpu
.has_fpu
= 0;
321 this_cpu_write(fpu_owner_task
, NULL
);
324 /* Must be paired with a 'clts' before! */
325 static inline void __thread_set_has_fpu(struct task_struct
*tsk
)
327 tsk
->thread
.fpu
.has_fpu
= 1;
328 this_cpu_write(fpu_owner_task
, tsk
);
332 * Encapsulate the CR0.TS handling together with the
335 * These generally need preemption protection to work,
336 * do try to avoid using these on their own.
338 static inline void __thread_fpu_end(struct task_struct
*tsk
)
340 __thread_clear_has_fpu(tsk
);
341 if (!use_eager_fpu())
345 static inline void __thread_fpu_begin(struct task_struct
*tsk
)
347 if (!use_eager_fpu())
349 __thread_set_has_fpu(tsk
);
352 static inline void __drop_fpu(struct task_struct
*tsk
)
354 if (__thread_has_fpu(tsk
)) {
355 /* Ignore delayed exceptions from user space */
356 asm volatile("1: fwait\n"
358 _ASM_EXTABLE(1b
, 2b
));
359 __thread_fpu_end(tsk
);
363 static inline void drop_fpu(struct task_struct
*tsk
)
366 * Forget coprocessor state..
369 tsk
->fpu_counter
= 0;
375 static inline void drop_init_fpu(struct task_struct
*tsk
)
377 if (!use_eager_fpu())
381 xrstor_state(init_xstate_buf
, -1);
383 fxrstor_checking(&init_xstate_buf
->i387
);
388 * FPU state switching for scheduling.
390 * This is a two-stage process:
392 * - switch_fpu_prepare() saves the old state and
393 * sets the new state of the CR0.TS bit. This is
394 * done within the context of the old process.
396 * - switch_fpu_finish() restores the new state as
399 typedef struct { int preload
; } fpu_switch_t
;
402 * FIXME! We could do a totally lazy restore, but we need to
403 * add a per-cpu "this was the task that last touched the FPU
404 * on this CPU" variable, and the task needs to have a "I last
405 * touched the FPU on this CPU" and check them.
407 * We don't do that yet, so "fpu_lazy_restore()" always returns
408 * false, but some day..
410 static inline int fpu_lazy_restore(struct task_struct
*new, unsigned int cpu
)
412 return new == this_cpu_read_stable(fpu_owner_task
) &&
413 cpu
== new->thread
.fpu
.last_cpu
;
416 static inline fpu_switch_t
switch_fpu_prepare(struct task_struct
*old
, struct task_struct
*new, int cpu
)
421 * If the task has used the math, pre-load the FPU on xsave processors
422 * or if the past 5 consecutive context-switches used math.
424 fpu
.preload
= tsk_used_math(new) && (use_eager_fpu() ||
425 new->fpu_counter
> 5);
426 if (__thread_has_fpu(old
)) {
427 if (!__save_init_fpu(old
))
429 old
->thread
.fpu
.last_cpu
= cpu
;
430 old
->thread
.fpu
.has_fpu
= 0; /* But leave fpu_owner_task! */
432 /* Don't change CR0.TS if we just switch! */
435 __thread_set_has_fpu(new);
436 prefetch(new->thread
.fpu
.state
);
437 } else if (!use_eager_fpu())
440 old
->fpu_counter
= 0;
441 old
->thread
.fpu
.last_cpu
= ~0;
444 if (!use_eager_fpu() && fpu_lazy_restore(new, cpu
))
447 prefetch(new->thread
.fpu
.state
);
448 __thread_fpu_begin(new);
455 * By the time this gets called, we've already cleared CR0.TS and
456 * given the process the FPU if we are going to preload the FPU
457 * state - all we need to do is to conditionally restore the register
460 static inline void switch_fpu_finish(struct task_struct
*new, fpu_switch_t fpu
)
463 if (unlikely(restore_fpu_checking(new)))
469 * Signal frame handlers...
471 extern int save_xstate_sig(void __user
*buf
, void __user
*fx
, int size
);
472 extern int __restore_xstate_sig(void __user
*buf
, void __user
*fx
, int size
);
474 static inline int xstate_sigframe_size(void)
476 return use_xsave() ? xstate_size
+ FP_XSTATE_MAGIC2_SIZE
: xstate_size
;
479 static inline int restore_xstate_sig(void __user
*buf
, int ia32_frame
)
481 void __user
*buf_fx
= buf
;
482 int size
= xstate_sigframe_size();
484 if (ia32_frame
&& use_fxsr()) {
485 buf_fx
= buf
+ sizeof(struct i387_fsave_struct
);
486 size
+= sizeof(struct i387_fsave_struct
);
489 return __restore_xstate_sig(buf
, buf_fx
, size
);
493 * Need to be preemption-safe.
495 * NOTE! user_fpu_begin() must be used only immediately before restoring
496 * it. This function does not do any save/restore on their own.
498 static inline void user_fpu_begin(void)
502 __thread_fpu_begin(current
);
506 static inline void __save_fpu(struct task_struct
*tsk
)
509 xsave_state(&tsk
->thread
.fpu
.state
->xsave
, -1);
511 fpu_fxsave(&tsk
->thread
.fpu
);
515 * These disable preemption on their own and are safe
517 static inline void save_init_fpu(struct task_struct
*tsk
)
519 WARN_ON_ONCE(!__thread_has_fpu(tsk
));
521 if (use_eager_fpu()) {
527 __save_init_fpu(tsk
);
528 __thread_fpu_end(tsk
);
533 * i387 state interaction
535 static inline unsigned short get_fpu_cwd(struct task_struct
*tsk
)
538 return tsk
->thread
.fpu
.state
->fxsave
.cwd
;
540 return (unsigned short)tsk
->thread
.fpu
.state
->fsave
.cwd
;
544 static inline unsigned short get_fpu_swd(struct task_struct
*tsk
)
547 return tsk
->thread
.fpu
.state
->fxsave
.swd
;
549 return (unsigned short)tsk
->thread
.fpu
.state
->fsave
.swd
;
553 static inline unsigned short get_fpu_mxcsr(struct task_struct
*tsk
)
556 return tsk
->thread
.fpu
.state
->fxsave
.mxcsr
;
558 return MXCSR_DEFAULT
;
562 static bool fpu_allocated(struct fpu
*fpu
)
564 return fpu
->state
!= NULL
;
567 static inline int fpu_alloc(struct fpu
*fpu
)
569 if (fpu_allocated(fpu
))
571 fpu
->state
= kmem_cache_alloc(task_xstate_cachep
, GFP_KERNEL
);
574 WARN_ON((unsigned long)fpu
->state
& 15);
578 static inline void fpu_free(struct fpu
*fpu
)
581 kmem_cache_free(task_xstate_cachep
, fpu
->state
);
586 static inline void fpu_copy(struct task_struct
*dst
, struct task_struct
*src
)
588 if (use_eager_fpu()) {
589 memset(&dst
->thread
.fpu
.state
->xsave
, 0, xstate_size
);
592 struct fpu
*dfpu
= &dst
->thread
.fpu
;
593 struct fpu
*sfpu
= &src
->thread
.fpu
;
596 memcpy(dfpu
->state
, sfpu
->state
, xstate_size
);
600 static inline unsigned long
601 alloc_mathframe(unsigned long sp
, int ia32_frame
, unsigned long *buf_fx
,
604 unsigned long frame_size
= xstate_sigframe_size();
606 *buf_fx
= sp
= round_down(sp
- frame_size
, 64);
607 if (ia32_frame
&& use_fxsr()) {
608 frame_size
+= sizeof(struct i387_fsave_struct
);
609 sp
-= sizeof(struct i387_fsave_struct
);