Merge branch 'writeback-for-next' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / arch / x86 / include / asm / fpu-internal.h
CommitLineData
1361b83a
LT
1/*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
8 */
9
10#ifndef _FPU_INTERNAL_H
11#define _FPU_INTERNAL_H
12
13#include <linux/kernel_stat.h>
14#include <linux/regset.h>
050902c0 15#include <linux/compat.h>
1361b83a
LT
16#include <linux/slab.h>
17#include <asm/asm.h>
18#include <asm/cpufeature.h>
19#include <asm/processor.h>
20#include <asm/sigcontext.h>
21#include <asm/user.h>
22#include <asm/uaccess.h>
23#include <asm/xsave.h>
49b8c695 24#include <asm/smap.h>
1361b83a 25
72a671ce
SS
26#ifdef CONFIG_X86_64
27# include <asm/sigcontext32.h>
28# include <asm/user32.h>
29int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
30 compat_sigset_t *set, struct pt_regs *regs);
31int ia32_setup_frame(int sig, struct k_sigaction *ka,
32 compat_sigset_t *set, struct pt_regs *regs);
33#else
34# define user_i387_ia32_struct user_i387_struct
35# define user32_fxsr_struct user_fxsr_struct
36# define ia32_setup_frame __setup_frame
37# define ia32_setup_rt_frame __setup_rt_frame
38#endif
39
40extern unsigned int mxcsr_feature_mask;
1361b83a 41extern void fpu_init(void);
5d2bd700 42extern void eager_fpu_init(void);
1361b83a
LT
43
44DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
45
72a671ce
SS
46extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
47 struct task_struct *tsk);
48extern void convert_to_fxsr(struct task_struct *tsk,
49 const struct user_i387_ia32_struct *env);
50
1361b83a
LT
51extern user_regset_active_fn fpregs_active, xfpregs_active;
52extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
53 xstateregs_get;
54extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
55 xstateregs_set;
56
1361b83a
LT
57/*
58 * xstateregs_active == fpregs_active. Please refer to the comment
59 * at the definition of fpregs_active.
60 */
61#define xstateregs_active fpregs_active
62
1361b83a 63#ifdef CONFIG_MATH_EMULATION
72a671ce 64# define HAVE_HWFP (boot_cpu_data.hard_math)
1361b83a
LT
65extern void finit_soft_fpu(struct i387_soft_struct *soft);
66#else
72a671ce 67# define HAVE_HWFP 1
1361b83a
LT
68static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
69#endif
70
050902c0
SS
71static inline int is_ia32_compat_frame(void)
72{
73 return config_enabled(CONFIG_IA32_EMULATION) &&
74 test_thread_flag(TIF_IA32);
75}
76
77static inline int is_ia32_frame(void)
78{
79 return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
80}
81
82static inline int is_x32_frame(void)
83{
84 return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
85}
86
1361b83a
LT
87#define X87_FSW_ES (1 << 7) /* Exception Summary */
88
5d2bd700
SS
89static __always_inline __pure bool use_eager_fpu(void)
90{
91 return static_cpu_has(X86_FEATURE_EAGER_FPU);
92}
93
1361b83a
LT
94static __always_inline __pure bool use_xsaveopt(void)
95{
96 return static_cpu_has(X86_FEATURE_XSAVEOPT);
97}
98
99static __always_inline __pure bool use_xsave(void)
100{
101 return static_cpu_has(X86_FEATURE_XSAVE);
102}
103
104static __always_inline __pure bool use_fxsr(void)
105{
106 return static_cpu_has(X86_FEATURE_FXSR);
107}
108
5d2bd700
SS
109static inline void fx_finit(struct i387_fxsave_struct *fx)
110{
111 memset(fx, 0, xstate_size);
112 fx->cwd = 0x37f;
a8615af4 113 fx->mxcsr = MXCSR_DEFAULT;
5d2bd700
SS
114}
115
1361b83a
LT
116extern void __sanitize_i387_state(struct task_struct *);
117
118static inline void sanitize_i387_state(struct task_struct *tsk)
119{
120 if (!use_xsaveopt())
121 return;
122 __sanitize_i387_state(tsk);
123}
124
49b8c695
PA
125#define user_insn(insn, output, input...) \
126({ \
127 int err; \
128 asm volatile(ASM_STAC "\n" \
129 "1:" #insn "\n\t" \
130 "2: " ASM_CLAC "\n" \
131 ".section .fixup,\"ax\"\n" \
132 "3: movl $-1,%[err]\n" \
133 " jmp 2b\n" \
134 ".previous\n" \
135 _ASM_EXTABLE(1b, 3b) \
136 : [err] "=r" (err), output \
137 : "0"(0), input); \
138 err; \
139})
140
0ca5bd0d
SS
141#define check_insn(insn, output, input...) \
142({ \
143 int err; \
144 asm volatile("1:" #insn "\n\t" \
145 "2:\n" \
146 ".section .fixup,\"ax\"\n" \
147 "3: movl $-1,%[err]\n" \
148 " jmp 2b\n" \
149 ".previous\n" \
150 _ASM_EXTABLE(1b, 3b) \
151 : [err] "=r" (err), output \
152 : "0"(0), input); \
153 err; \
154})
155
156static inline int fsave_user(struct i387_fsave_struct __user *fx)
1361b83a 157{
49b8c695 158 return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
1361b83a
LT
159}
160
161static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
162{
0ca5bd0d 163 if (config_enabled(CONFIG_X86_32))
49b8c695 164 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
0ca5bd0d 165 else if (config_enabled(CONFIG_AS_FXSAVEQ))
49b8c695 166 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
1361b83a 167
0ca5bd0d 168 /* See comment in fpu_fxsave() below. */
49b8c695 169 return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
1361b83a
LT
170}
171
0ca5bd0d 172static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
1361b83a 173{
0ca5bd0d
SS
174 if (config_enabled(CONFIG_X86_32))
175 return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
176 else if (config_enabled(CONFIG_AS_FXSAVEQ))
177 return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
1361b83a 178
0ca5bd0d
SS
179 /* See comment in fpu_fxsave() below. */
180 return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
181 "m" (*fx));
1361b83a
LT
182}
183
e139e955
PA
184static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
185{
186 if (config_enabled(CONFIG_X86_32))
187 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
188 else if (config_enabled(CONFIG_AS_FXSAVEQ))
189 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
190
191 /* See comment in fpu_fxsave() below. */
192 return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
193 "m" (*fx));
194}
195
0ca5bd0d 196static inline int frstor_checking(struct i387_fsave_struct *fx)
1361b83a 197{
0ca5bd0d 198 return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
e139e955
PA
199}
200
201static inline int frstor_user(struct i387_fsave_struct __user *fx)
202{
203 return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
1361b83a
LT
204}
205
206static inline void fpu_fxsave(struct fpu *fpu)
207{
0ca5bd0d
SS
208 if (config_enabled(CONFIG_X86_32))
209 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
210 else if (config_enabled(CONFIG_AS_FXSAVEQ))
211 asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave));
212 else {
213 /* Using "rex64; fxsave %0" is broken because, if the memory
214 * operand uses any extended registers for addressing, a second
215 * REX prefix will be generated (to the assembler, rex64
216 * followed by semicolon is a separate instruction), and hence
217 * the 64-bitness is lost.
218 *
219 * Using "fxsaveq %0" would be the ideal choice, but is only
220 * supported starting with gas 2.16.
221 *
222 * Using, as a workaround, the properly prefixed form below
223 * isn't accepted by any binutils version so far released,
224 * complaining that the same type of prefix is used twice if
225 * an extended register is needed for addressing (fix submitted
226 * to mainline 2005-11-21).
227 *
228 * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
229 *
230 * This, however, we can work around by forcing the compiler to
231 * select an addressing mode that doesn't require extended
232 * registers.
233 */
234 asm volatile( "rex64/fxsave (%[fx])"
235 : "=m" (fpu->state->fxsave)
236 : [fx] "R" (&fpu->state->fxsave));
237 }
1361b83a
LT
238}
239
1361b83a
LT
240/*
241 * These must be called with preempt disabled. Returns
242 * 'true' if the FPU state is still intact.
243 */
244static inline int fpu_save_init(struct fpu *fpu)
245{
246 if (use_xsave()) {
247 fpu_xsave(fpu);
248
249 /*
250 * xsave header may indicate the init state of the FP.
251 */
252 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
253 return 1;
254 } else if (use_fxsr()) {
255 fpu_fxsave(fpu);
256 } else {
257 asm volatile("fnsave %[fx]; fwait"
258 : [fx] "=m" (fpu->state->fsave));
259 return 0;
260 }
261
262 /*
263 * If exceptions are pending, we need to clear them so
264 * that we don't randomly get exceptions later.
265 *
266 * FIXME! Is this perhaps only true for the old-style
267 * irq13 case? Maybe we could leave the x87 state
268 * intact otherwise?
269 */
270 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
271 asm volatile("fnclex");
272 return 0;
273 }
274 return 1;
275}
276
277static inline int __save_init_fpu(struct task_struct *tsk)
278{
279 return fpu_save_init(&tsk->thread.fpu);
280}
281
1361b83a
LT
282static inline int fpu_restore_checking(struct fpu *fpu)
283{
284 if (use_xsave())
0ca5bd0d
SS
285 return fpu_xrstor_checking(&fpu->state->xsave);
286 else if (use_fxsr())
287 return fxrstor_checking(&fpu->state->fxsave);
1361b83a 288 else
0ca5bd0d 289 return frstor_checking(&fpu->state->fsave);
1361b83a
LT
290}
291
292static inline int restore_fpu_checking(struct task_struct *tsk)
293{
294 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
295 is pending. Clear the x87 state here by setting it to fixed
296 values. "m" is a random variable that should be in L1 */
297 alternative_input(
298 ASM_NOP8 ASM_NOP2,
299 "emms\n\t" /* clear stack tags */
300 "fildl %P[addr]", /* set F?P to defined value */
301 X86_FEATURE_FXSAVE_LEAK,
302 [addr] "m" (tsk->thread.fpu.has_fpu));
303
304 return fpu_restore_checking(&tsk->thread.fpu);
305}
306
307/*
308 * Software FPU state helpers. Careful: these need to
309 * be preemption protection *and* they need to be
310 * properly paired with the CR0.TS changes!
311 */
312static inline int __thread_has_fpu(struct task_struct *tsk)
313{
314 return tsk->thread.fpu.has_fpu;
315}
316
317/* Must be paired with an 'stts' after! */
318static inline void __thread_clear_has_fpu(struct task_struct *tsk)
319{
320 tsk->thread.fpu.has_fpu = 0;
c6ae41e7 321 this_cpu_write(fpu_owner_task, NULL);
1361b83a
LT
322}
323
324/* Must be paired with a 'clts' before! */
325static inline void __thread_set_has_fpu(struct task_struct *tsk)
326{
327 tsk->thread.fpu.has_fpu = 1;
c6ae41e7 328 this_cpu_write(fpu_owner_task, tsk);
1361b83a
LT
329}
330
331/*
332 * Encapsulate the CR0.TS handling together with the
333 * software flag.
334 *
335 * These generally need preemption protection to work,
336 * do try to avoid using these on their own.
337 */
338static inline void __thread_fpu_end(struct task_struct *tsk)
339{
340 __thread_clear_has_fpu(tsk);
5d2bd700 341 if (!use_eager_fpu())
304bceda 342 stts();
1361b83a
LT
343}
344
345static inline void __thread_fpu_begin(struct task_struct *tsk)
346{
5d2bd700 347 if (!use_eager_fpu())
304bceda 348 clts();
1361b83a
LT
349 __thread_set_has_fpu(tsk);
350}
351
304bceda
SS
352static inline void __drop_fpu(struct task_struct *tsk)
353{
354 if (__thread_has_fpu(tsk)) {
355 /* Ignore delayed exceptions from user space */
356 asm volatile("1: fwait\n"
357 "2:\n"
358 _ASM_EXTABLE(1b, 2b));
359 __thread_fpu_end(tsk);
360 }
361}
362
363static inline void drop_fpu(struct task_struct *tsk)
364{
365 /*
366 * Forget coprocessor state..
367 */
368 preempt_disable();
369 tsk->fpu_counter = 0;
370 __drop_fpu(tsk);
371 clear_used_math();
372 preempt_enable();
373}
374
375static inline void drop_init_fpu(struct task_struct *tsk)
376{
5d2bd700 377 if (!use_eager_fpu())
304bceda 378 drop_fpu(tsk);
5d2bd700
SS
379 else {
380 if (use_xsave())
381 xrstor_state(init_xstate_buf, -1);
382 else
383 fxrstor_checking(&init_xstate_buf->i387);
384 }
304bceda
SS
385}
386
1361b83a
LT
387/*
388 * FPU state switching for scheduling.
389 *
390 * This is a two-stage process:
391 *
392 * - switch_fpu_prepare() saves the old state and
393 * sets the new state of the CR0.TS bit. This is
394 * done within the context of the old process.
395 *
396 * - switch_fpu_finish() restores the new state as
397 * necessary.
398 */
399typedef struct { int preload; } fpu_switch_t;
400
401/*
402 * FIXME! We could do a totally lazy restore, but we need to
403 * add a per-cpu "this was the task that last touched the FPU
404 * on this CPU" variable, and the task needs to have a "I last
405 * touched the FPU on this CPU" and check them.
406 *
407 * We don't do that yet, so "fpu_lazy_restore()" always returns
408 * false, but some day..
409 */
410static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
411{
c6ae41e7 412 return new == this_cpu_read_stable(fpu_owner_task) &&
1361b83a
LT
413 cpu == new->thread.fpu.last_cpu;
414}
415
416static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
417{
418 fpu_switch_t fpu;
419
304bceda
SS
420 /*
421 * If the task has used the math, pre-load the FPU on xsave processors
422 * or if the past 5 consecutive context-switches used math.
423 */
5d2bd700 424 fpu.preload = tsk_used_math(new) && (use_eager_fpu() ||
304bceda 425 new->fpu_counter > 5);
1361b83a
LT
426 if (__thread_has_fpu(old)) {
427 if (!__save_init_fpu(old))
428 cpu = ~0;
429 old->thread.fpu.last_cpu = cpu;
430 old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */
431
432 /* Don't change CR0.TS if we just switch! */
433 if (fpu.preload) {
434 new->fpu_counter++;
435 __thread_set_has_fpu(new);
436 prefetch(new->thread.fpu.state);
5d2bd700 437 } else if (!use_eager_fpu())
1361b83a
LT
438 stts();
439 } else {
440 old->fpu_counter = 0;
441 old->thread.fpu.last_cpu = ~0;
442 if (fpu.preload) {
443 new->fpu_counter++;
5d2bd700 444 if (!use_eager_fpu() && fpu_lazy_restore(new, cpu))
1361b83a
LT
445 fpu.preload = 0;
446 else
447 prefetch(new->thread.fpu.state);
448 __thread_fpu_begin(new);
449 }
450 }
451 return fpu;
452}
453
454/*
455 * By the time this gets called, we've already cleared CR0.TS and
456 * given the process the FPU if we are going to preload the FPU
457 * state - all we need to do is to conditionally restore the register
458 * state itself.
459 */
460static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
461{
462 if (fpu.preload) {
463 if (unlikely(restore_fpu_checking(new)))
304bceda 464 drop_init_fpu(new);
1361b83a
LT
465 }
466}
467
468/*
469 * Signal frame handlers...
470 */
72a671ce
SS
471extern int save_xstate_sig(void __user *buf, void __user *fx, int size);
472extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size);
1361b83a 473
72a671ce 474static inline int xstate_sigframe_size(void)
1361b83a 475{
72a671ce
SS
476 return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
477}
478
479static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
480{
481 void __user *buf_fx = buf;
482 int size = xstate_sigframe_size();
483
484 if (ia32_frame && use_fxsr()) {
485 buf_fx = buf + sizeof(struct i387_fsave_struct);
486 size += sizeof(struct i387_fsave_struct);
1361b83a 487 }
72a671ce
SS
488
489 return __restore_xstate_sig(buf, buf_fx, size);
1361b83a
LT
490}
491
492/*
377ffbcc 493 * Need to be preemption-safe.
1361b83a 494 *
377ffbcc
SS
495 * NOTE! user_fpu_begin() must be used only immediately before restoring
496 * it. This function does not do any save/restore on their own.
1361b83a 497 */
1361b83a
LT
498static inline void user_fpu_begin(void)
499{
500 preempt_disable();
501 if (!user_has_fpu())
502 __thread_fpu_begin(current);
503 preempt_enable();
504}
505
5d2bd700
SS
506static inline void __save_fpu(struct task_struct *tsk)
507{
508 if (use_xsave())
509 xsave_state(&tsk->thread.fpu.state->xsave, -1);
510 else
511 fpu_fxsave(&tsk->thread.fpu);
512}
513
1361b83a
LT
514/*
515 * These disable preemption on their own and are safe
516 */
517static inline void save_init_fpu(struct task_struct *tsk)
518{
519 WARN_ON_ONCE(!__thread_has_fpu(tsk));
304bceda 520
5d2bd700
SS
521 if (use_eager_fpu()) {
522 __save_fpu(tsk);
304bceda
SS
523 return;
524 }
525
1361b83a
LT
526 preempt_disable();
527 __save_init_fpu(tsk);
528 __thread_fpu_end(tsk);
529 preempt_enable();
530}
531
1361b83a
LT
532/*
533 * i387 state interaction
534 */
535static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
536{
537 if (cpu_has_fxsr) {
538 return tsk->thread.fpu.state->fxsave.cwd;
539 } else {
540 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
541 }
542}
543
544static inline unsigned short get_fpu_swd(struct task_struct *tsk)
545{
546 if (cpu_has_fxsr) {
547 return tsk->thread.fpu.state->fxsave.swd;
548 } else {
549 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
550 }
551}
552
553static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
554{
555 if (cpu_has_xmm) {
556 return tsk->thread.fpu.state->fxsave.mxcsr;
557 } else {
558 return MXCSR_DEFAULT;
559 }
560}
561
562static bool fpu_allocated(struct fpu *fpu)
563{
564 return fpu->state != NULL;
565}
566
567static inline int fpu_alloc(struct fpu *fpu)
568{
569 if (fpu_allocated(fpu))
570 return 0;
571 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
572 if (!fpu->state)
573 return -ENOMEM;
574 WARN_ON((unsigned long)fpu->state & 15);
575 return 0;
576}
577
578static inline void fpu_free(struct fpu *fpu)
579{
580 if (fpu->state) {
581 kmem_cache_free(task_xstate_cachep, fpu->state);
582 fpu->state = NULL;
583 }
584}
585
304bceda 586static inline void fpu_copy(struct task_struct *dst, struct task_struct *src)
1361b83a 587{
5d2bd700
SS
588 if (use_eager_fpu()) {
589 memset(&dst->thread.fpu.state->xsave, 0, xstate_size);
590 __save_fpu(dst);
304bceda
SS
591 } else {
592 struct fpu *dfpu = &dst->thread.fpu;
593 struct fpu *sfpu = &src->thread.fpu;
594
595 unlazy_fpu(src);
596 memcpy(dfpu->state, sfpu->state, xstate_size);
597 }
1361b83a
LT
598}
599
72a671ce
SS
600static inline unsigned long
601alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx,
602 unsigned long *size)
603{
604 unsigned long frame_size = xstate_sigframe_size();
605
606 *buf_fx = sp = round_down(sp - frame_size, 64);
607 if (ia32_frame && use_fxsr()) {
608 frame_size += sizeof(struct i387_fsave_struct);
609 sp -= sizeof(struct i387_fsave_struct);
610 }
611
612 *size = frame_size;
613 return sp;
614}
1361b83a
LT
615
616#endif
This page took 0.082628 seconds and 5 git commands to generate.