2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <asm/fpu/internal.h>
9 #include <asm/fpu/signal.h>
11 #include <linux/hardirq.h>
14 * Track whether the kernel is using the FPU state
19 * - by IRQ context code to potentially use the FPU
22 * - to debug kernel_fpu_begin()/end() correctness
24 static DEFINE_PER_CPU(bool, in_kernel_fpu
);
27 * Track which context is using the FPU on the CPU:
29 DEFINE_PER_CPU(struct fpu
*, fpu_fpregs_owner_ctx
);
31 static void kernel_fpu_disable(void)
33 WARN_ON(this_cpu_read(in_kernel_fpu
));
34 this_cpu_write(in_kernel_fpu
, true);
37 static void kernel_fpu_enable(void)
39 WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu
));
40 this_cpu_write(in_kernel_fpu
, false);
43 static bool kernel_fpu_disabled(void)
45 return this_cpu_read(in_kernel_fpu
);
49 * Were we in an interrupt that interrupted kernel mode?
51 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
52 * pair does nothing at all: the thread must not have fpu (so
53 * that we don't try to save the FPU state), and TS must
54 * be set (so that the clts/stts pair does nothing that is
55 * visible in the interrupted kernel thread).
57 * Except for the eagerfpu case when we return true; in the likely case
58 * the thread has FPU but we are not going to set/clear TS.
60 static bool interrupted_kernel_fpu_idle(void)
62 if (kernel_fpu_disabled())
68 return !current
->thread
.fpu
.fpregs_active
&& (read_cr0() & X86_CR0_TS
);
72 * Were we in user mode (or vm86 mode) when we were
75 * Doing kernel_fpu_begin/end() is ok if we are running
76 * in an interrupt context from user mode - we'll just
77 * save the FPU state as required.
79 static bool interrupted_user_mode(void)
81 struct pt_regs
*regs
= get_irq_regs();
82 return regs
&& user_mode(regs
);
86 * Can we use the FPU in kernel mode with the
87 * whole "kernel_fpu_begin/end()" sequence?
89 * It's always ok in process context (ie "not interrupt")
90 * but it is sometimes ok even from an irq.
92 bool irq_fpu_usable(void)
94 return !in_interrupt() ||
95 interrupted_user_mode() ||
96 interrupted_kernel_fpu_idle();
98 EXPORT_SYMBOL(irq_fpu_usable
);
100 void __kernel_fpu_begin(void)
102 struct fpu
*fpu
= ¤t
->thread
.fpu
;
104 kernel_fpu_disable();
106 if (fpu
->fpregs_active
) {
107 copy_fpregs_to_fpstate(fpu
);
109 this_cpu_write(fpu_fpregs_owner_ctx
, NULL
);
110 __fpregs_activate_hw();
113 EXPORT_SYMBOL(__kernel_fpu_begin
);
115 void __kernel_fpu_end(void)
117 struct fpu
*fpu
= ¤t
->thread
.fpu
;
119 if (fpu
->fpregs_active
) {
120 if (WARN_ON(copy_fpstate_to_fpregs(fpu
)))
123 __fpregs_deactivate_hw();
128 EXPORT_SYMBOL(__kernel_fpu_end
);
130 void kernel_fpu_begin(void)
133 WARN_ON_ONCE(!irq_fpu_usable());
134 __kernel_fpu_begin();
136 EXPORT_SYMBOL_GPL(kernel_fpu_begin
);
138 void kernel_fpu_end(void)
143 EXPORT_SYMBOL_GPL(kernel_fpu_end
);
146 * CR0::TS save/restore functions:
148 int irq_ts_save(void)
151 * If in process context and not atomic, we can take a spurious DNA fault.
152 * Otherwise, doing clts() in process context requires disabling preemption
153 * or some heavy lifting like kernel_fpu_begin()
158 if (read_cr0() & X86_CR0_TS
) {
165 EXPORT_SYMBOL_GPL(irq_ts_save
);
167 void irq_ts_restore(int TS_state
)
172 EXPORT_SYMBOL_GPL(irq_ts_restore
);
175 * Save the FPU state (mark it for reload if necessary):
177 * This only ever gets called for the current task.
179 void fpu__save(struct fpu
*fpu
)
181 WARN_ON(fpu
!= ¤t
->thread
.fpu
);
184 if (fpu
->fpregs_active
) {
185 if (!copy_fpregs_to_fpstate(fpu
))
186 fpregs_deactivate(fpu
);
190 EXPORT_SYMBOL_GPL(fpu__save
);
192 void fpstate_init(struct fpu
*fpu
)
195 finit_soft_fpu(&fpu
->state
.soft
);
199 memset(&fpu
->state
, 0, xstate_size
);
202 fx_finit(&fpu
->state
.fxsave
);
204 struct i387_fsave_struct
*fp
= &fpu
->state
.fsave
;
205 fp
->cwd
= 0xffff037fu
;
206 fp
->swd
= 0xffff0000u
;
207 fp
->twd
= 0xffffffffu
;
208 fp
->fos
= 0xffff0000u
;
211 EXPORT_SYMBOL_GPL(fpstate_init
);
214 * Copy the current task's FPU state to a new task's FPU context.
216 * In the 'eager' case we just save to the destination context.
218 * In the 'lazy' case we save to the source context, mark the FPU lazy
219 * via stts() and copy the source context into the destination context.
221 static void fpu_copy(struct fpu
*dst_fpu
, struct fpu
*src_fpu
)
223 WARN_ON(src_fpu
!= ¤t
->thread
.fpu
);
226 * Don't let 'init optimized' areas of the XSAVE area
227 * leak into the child task:
230 memset(&dst_fpu
->state
.xsave
, 0, xstate_size
);
233 * Save current FPU registers directly into the child
234 * FPU context, without any memory-to-memory copying.
236 * If the FPU context got destroyed in the process (FNSAVE
237 * done on old CPUs) then copy it back into the source
238 * context and mark the current task for lazy restore.
240 * We have to do all this with preemption disabled,
241 * mostly because of the FNSAVE case, because in that
242 * case we must not allow preemption in the window
243 * between the FNSAVE and us marking the context lazy.
245 * It shouldn't be an issue as even FNSAVE is plenty
246 * fast in terms of critical section length.
249 if (!copy_fpregs_to_fpstate(dst_fpu
)) {
250 memcpy(&src_fpu
->state
, &dst_fpu
->state
, xstate_size
);
251 fpregs_deactivate(src_fpu
);
256 int fpu__copy(struct fpu
*dst_fpu
, struct fpu
*src_fpu
)
258 dst_fpu
->counter
= 0;
259 dst_fpu
->fpregs_active
= 0;
260 dst_fpu
->last_cpu
= -1;
262 if (src_fpu
->fpstate_active
)
263 fpu_copy(dst_fpu
, src_fpu
);
269 * Activate the current task's in-memory FPU context,
270 * if it has not been used before:
272 void fpu__activate_curr(struct fpu
*fpu
)
274 WARN_ON_ONCE(fpu
!= ¤t
->thread
.fpu
);
276 if (!fpu
->fpstate_active
) {
279 /* Safe to do for the current task: */
280 fpu
->fpstate_active
= 1;
283 EXPORT_SYMBOL_GPL(fpu__activate_curr
);
286 * This function must be called before we modify a stopped child's
289 * If the child has not used the FPU before then initialize its
292 * If the child has used the FPU before then unlazy it.
294 * [ After this function call, after registers in the fpstate are
295 * modified and the child task has woken up, the child task will
296 * restore the modified FPU state from the modified context. If we
297 * didn't clear its lazy status here then the lazy in-registers
298 * state pending on its former CPU could be restored, corrupting
299 * the modifications. ]
301 * This function is also called before we read a stopped child's
302 * FPU state - to make sure it's initialized if the child has
303 * no active FPU state.
305 * TODO: A future optimization would be to skip the unlazying in
306 * the read-only case, it's not strictly necessary for
307 * read-only access to the context.
309 static void fpu__activate_stopped(struct fpu
*child_fpu
)
311 WARN_ON_ONCE(child_fpu
== ¤t
->thread
.fpu
);
313 if (child_fpu
->fpstate_active
) {
314 child_fpu
->last_cpu
= -1;
316 fpstate_init(child_fpu
);
318 /* Safe to do for stopped child tasks: */
319 child_fpu
->fpstate_active
= 1;
324 * 'fpu__restore()' is called to copy FPU registers from
325 * the FPU fpstate to the live hw registers and to activate
326 * access to the hardware registers, so that FPU instructions
327 * can be used afterwards.
329 * Must be called with kernel preemption disabled (for example
330 * with local interrupts disabled, as it is in the case of
331 * do_device_not_available()).
333 void fpu__restore(void)
335 struct task_struct
*tsk
= current
;
336 struct fpu
*fpu
= &tsk
->thread
.fpu
;
338 fpu__activate_curr(fpu
);
340 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
341 kernel_fpu_disable();
342 fpregs_activate(fpu
);
343 if (unlikely(copy_fpstate_to_fpregs(fpu
))) {
345 force_sig_info(SIGSEGV
, SEND_SIG_PRIV
, tsk
);
347 tsk
->thread
.fpu
.counter
++;
351 EXPORT_SYMBOL_GPL(fpu__restore
);
354 * Drops current FPU state: deactivates the fpregs and
355 * the fpstate. NOTE: it still leaves previous contents
356 * in the fpregs in the eager-FPU case.
358 * This function can be used in cases where we know that
359 * a state-restore is coming: either an explicit one,
362 void fpu__drop(struct fpu
*fpu
)
367 if (fpu
->fpregs_active
) {
368 /* Ignore delayed exceptions from user space */
369 asm volatile("1: fwait\n"
371 _ASM_EXTABLE(1b
, 2b
));
372 fpregs_deactivate(fpu
);
375 fpu
->fpstate_active
= 0;
381 * Clear the FPU state back to init state.
383 * Called by sys_execve(), by the signal handler code and by various
386 void fpu__clear(struct fpu
*fpu
)
388 WARN_ON_ONCE(fpu
!= ¤t
->thread
.fpu
); /* Almost certainly an anomaly */
390 if (!use_eager_fpu()) {
391 /* FPU state will be reallocated lazily at the first use. */
394 if (!fpu
->fpstate_active
) {
395 fpu__activate_curr(fpu
);
398 restore_init_xstate();
403 * The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
404 * as the "regset->n" for the xstate regset will be updated based on the feature
405 * capabilites supported by the xsave.
407 int regset_fpregs_active(struct task_struct
*target
, const struct user_regset
*regset
)
409 struct fpu
*target_fpu
= &target
->thread
.fpu
;
411 return target_fpu
->fpstate_active
? regset
->n
: 0;
414 int regset_xregset_fpregs_active(struct task_struct
*target
, const struct user_regset
*regset
)
416 struct fpu
*target_fpu
= &target
->thread
.fpu
;
418 return (cpu_has_fxsr
&& target_fpu
->fpstate_active
) ? regset
->n
: 0;
421 int xfpregs_get(struct task_struct
*target
, const struct user_regset
*regset
,
422 unsigned int pos
, unsigned int count
,
423 void *kbuf
, void __user
*ubuf
)
425 struct fpu
*fpu
= &target
->thread
.fpu
;
430 fpu__activate_stopped(fpu
);
431 fpstate_sanitize_xstate(fpu
);
433 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
434 &fpu
->state
.fxsave
, 0, -1);
437 int xfpregs_set(struct task_struct
*target
, const struct user_regset
*regset
,
438 unsigned int pos
, unsigned int count
,
439 const void *kbuf
, const void __user
*ubuf
)
441 struct fpu
*fpu
= &target
->thread
.fpu
;
447 fpu__activate_stopped(fpu
);
448 fpstate_sanitize_xstate(fpu
);
450 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
451 &fpu
->state
.fxsave
, 0, -1);
454 * mxcsr reserved bits must be masked to zero for security reasons.
456 fpu
->state
.fxsave
.mxcsr
&= mxcsr_feature_mask
;
459 * update the header bits in the xsave header, indicating the
460 * presence of FP and SSE state.
463 fpu
->state
.xsave
.header
.xfeatures
|= XSTATE_FPSSE
;
468 int xstateregs_get(struct task_struct
*target
, const struct user_regset
*regset
,
469 unsigned int pos
, unsigned int count
,
470 void *kbuf
, void __user
*ubuf
)
472 struct fpu
*fpu
= &target
->thread
.fpu
;
473 struct xsave_struct
*xsave
;
479 fpu__activate_stopped(fpu
);
481 xsave
= &fpu
->state
.xsave
;
484 * Copy the 48bytes defined by the software first into the xstate
485 * memory layout in the thread struct, so that we can copy the entire
486 * xstateregs to the user using one user_regset_copyout().
488 memcpy(&xsave
->i387
.sw_reserved
,
489 xstate_fx_sw_bytes
, sizeof(xstate_fx_sw_bytes
));
491 * Copy the xstate memory layout.
493 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, xsave
, 0, -1);
497 int xstateregs_set(struct task_struct
*target
, const struct user_regset
*regset
,
498 unsigned int pos
, unsigned int count
,
499 const void *kbuf
, const void __user
*ubuf
)
501 struct fpu
*fpu
= &target
->thread
.fpu
;
502 struct xsave_struct
*xsave
;
508 fpu__activate_stopped(fpu
);
510 xsave
= &fpu
->state
.xsave
;
512 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, xsave
, 0, -1);
514 * mxcsr reserved bits must be masked to zero for security reasons.
516 xsave
->i387
.mxcsr
&= mxcsr_feature_mask
;
517 xsave
->header
.xfeatures
&= xfeatures_mask
;
519 * These bits must be zero.
521 memset(&xsave
->header
.reserved
, 0, 48);
526 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
529 * FPU tag word conversions.
532 static inline unsigned short twd_i387_to_fxsr(unsigned short twd
)
534 unsigned int tmp
; /* to avoid 16 bit prefixes in the code */
536 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
538 tmp
= (tmp
| (tmp
>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
539 /* and move the valid bits to the lower byte. */
540 tmp
= (tmp
| (tmp
>> 1)) & 0x3333; /* 00VV00VV00VV00VV */
541 tmp
= (tmp
| (tmp
>> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
542 tmp
= (tmp
| (tmp
>> 4)) & 0x00ff; /* 00000000VVVVVVVV */
547 #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
548 #define FP_EXP_TAG_VALID 0
549 #define FP_EXP_TAG_ZERO 1
550 #define FP_EXP_TAG_SPECIAL 2
551 #define FP_EXP_TAG_EMPTY 3
553 static inline u32
twd_fxsr_to_i387(struct i387_fxsave_struct
*fxsave
)
556 u32 tos
= (fxsave
->swd
>> 11) & 7;
557 u32 twd
= (unsigned long) fxsave
->twd
;
559 u32 ret
= 0xffff0000u
;
562 for (i
= 0; i
< 8; i
++, twd
>>= 1) {
564 st
= FPREG_ADDR(fxsave
, (i
- tos
) & 7);
566 switch (st
->exponent
& 0x7fff) {
568 tag
= FP_EXP_TAG_SPECIAL
;
571 if (!st
->significand
[0] &&
572 !st
->significand
[1] &&
573 !st
->significand
[2] &&
575 tag
= FP_EXP_TAG_ZERO
;
577 tag
= FP_EXP_TAG_SPECIAL
;
580 if (st
->significand
[3] & 0x8000)
581 tag
= FP_EXP_TAG_VALID
;
583 tag
= FP_EXP_TAG_SPECIAL
;
587 tag
= FP_EXP_TAG_EMPTY
;
589 ret
|= tag
<< (2 * i
);
595 * FXSR floating point environment conversions.
599 convert_from_fxsr(struct user_i387_ia32_struct
*env
, struct task_struct
*tsk
)
601 struct i387_fxsave_struct
*fxsave
= &tsk
->thread
.fpu
.state
.fxsave
;
602 struct _fpreg
*to
= (struct _fpreg
*) &env
->st_space
[0];
603 struct _fpxreg
*from
= (struct _fpxreg
*) &fxsave
->st_space
[0];
606 env
->cwd
= fxsave
->cwd
| 0xffff0000u
;
607 env
->swd
= fxsave
->swd
| 0xffff0000u
;
608 env
->twd
= twd_fxsr_to_i387(fxsave
);
611 env
->fip
= fxsave
->rip
;
612 env
->foo
= fxsave
->rdp
;
614 * should be actually ds/cs at fpu exception time, but
615 * that information is not available in 64bit mode.
617 env
->fcs
= task_pt_regs(tsk
)->cs
;
618 if (tsk
== current
) {
619 savesegment(ds
, env
->fos
);
621 env
->fos
= tsk
->thread
.ds
;
623 env
->fos
|= 0xffff0000;
625 env
->fip
= fxsave
->fip
;
626 env
->fcs
= (u16
) fxsave
->fcs
| ((u32
) fxsave
->fop
<< 16);
627 env
->foo
= fxsave
->foo
;
628 env
->fos
= fxsave
->fos
;
631 for (i
= 0; i
< 8; ++i
)
632 memcpy(&to
[i
], &from
[i
], sizeof(to
[0]));
635 void convert_to_fxsr(struct task_struct
*tsk
,
636 const struct user_i387_ia32_struct
*env
)
639 struct i387_fxsave_struct
*fxsave
= &tsk
->thread
.fpu
.state
.fxsave
;
640 struct _fpreg
*from
= (struct _fpreg
*) &env
->st_space
[0];
641 struct _fpxreg
*to
= (struct _fpxreg
*) &fxsave
->st_space
[0];
644 fxsave
->cwd
= env
->cwd
;
645 fxsave
->swd
= env
->swd
;
646 fxsave
->twd
= twd_i387_to_fxsr(env
->twd
);
647 fxsave
->fop
= (u16
) ((u32
) env
->fcs
>> 16);
649 fxsave
->rip
= env
->fip
;
650 fxsave
->rdp
= env
->foo
;
651 /* cs and ds ignored */
653 fxsave
->fip
= env
->fip
;
654 fxsave
->fcs
= (env
->fcs
& 0xffff);
655 fxsave
->foo
= env
->foo
;
656 fxsave
->fos
= env
->fos
;
659 for (i
= 0; i
< 8; ++i
)
660 memcpy(&to
[i
], &from
[i
], sizeof(from
[0]));
663 int fpregs_get(struct task_struct
*target
, const struct user_regset
*regset
,
664 unsigned int pos
, unsigned int count
,
665 void *kbuf
, void __user
*ubuf
)
667 struct fpu
*fpu
= &target
->thread
.fpu
;
668 struct user_i387_ia32_struct env
;
670 fpu__activate_stopped(fpu
);
672 if (!static_cpu_has(X86_FEATURE_FPU
))
673 return fpregs_soft_get(target
, regset
, pos
, count
, kbuf
, ubuf
);
676 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
677 &fpu
->state
.fsave
, 0,
680 fpstate_sanitize_xstate(fpu
);
682 if (kbuf
&& pos
== 0 && count
== sizeof(env
)) {
683 convert_from_fxsr(kbuf
, target
);
687 convert_from_fxsr(&env
, target
);
689 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &env
, 0, -1);
692 int fpregs_set(struct task_struct
*target
, const struct user_regset
*regset
,
693 unsigned int pos
, unsigned int count
,
694 const void *kbuf
, const void __user
*ubuf
)
696 struct fpu
*fpu
= &target
->thread
.fpu
;
697 struct user_i387_ia32_struct env
;
700 fpu__activate_stopped(fpu
);
701 fpstate_sanitize_xstate(fpu
);
703 if (!static_cpu_has(X86_FEATURE_FPU
))
704 return fpregs_soft_set(target
, regset
, pos
, count
, kbuf
, ubuf
);
707 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
708 &fpu
->state
.fsave
, 0,
711 if (pos
> 0 || count
< sizeof(env
))
712 convert_from_fxsr(&env
, target
);
714 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &env
, 0, -1);
716 convert_to_fxsr(target
, &env
);
719 * update the header bit in the xsave header, indicating the
723 fpu
->state
.xsave
.header
.xfeatures
|= XSTATE_FP
;
728 * FPU state for core dumps.
729 * This is only used for a.out dumps now.
730 * It is declared generically using elf_fpregset_t (which is
731 * struct user_i387_struct) but is in fact only used for 32-bit
732 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
734 int dump_fpu(struct pt_regs
*regs
, struct user_i387_struct
*ufpu
)
736 struct task_struct
*tsk
= current
;
737 struct fpu
*fpu
= &tsk
->thread
.fpu
;
740 fpvalid
= fpu
->fpstate_active
;
742 fpvalid
= !fpregs_get(tsk
, NULL
,
743 0, sizeof(struct user_i387_ia32_struct
),
748 EXPORT_SYMBOL(dump_fpu
);
750 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */