Merge tag 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux...
[deliverable/linux.git] / arch / x86 / include / asm / fpu-internal.h
CommitLineData
1361b83a
LT
1/*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
8 */
9
10#ifndef _FPU_INTERNAL_H
11#define _FPU_INTERNAL_H
12
13#include <linux/kernel_stat.h>
14#include <linux/regset.h>
050902c0 15#include <linux/compat.h>
1361b83a
LT
16#include <linux/slab.h>
17#include <asm/asm.h>
18#include <asm/cpufeature.h>
19#include <asm/processor.h>
20#include <asm/sigcontext.h>
21#include <asm/user.h>
22#include <asm/uaccess.h>
23#include <asm/xsave.h>
49b8c695 24#include <asm/smap.h>
1361b83a 25
72a671ce
SS
26#ifdef CONFIG_X86_64
27# include <asm/sigcontext32.h>
28# include <asm/user32.h>
235b8022
AV
29struct ksignal;
30int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
72a671ce 31 compat_sigset_t *set, struct pt_regs *regs);
235b8022 32int ia32_setup_frame(int sig, struct ksignal *ksig,
72a671ce
SS
33 compat_sigset_t *set, struct pt_regs *regs);
34#else
35# define user_i387_ia32_struct user_i387_struct
36# define user32_fxsr_struct user_fxsr_struct
37# define ia32_setup_frame __setup_frame
38# define ia32_setup_rt_frame __setup_rt_frame
39#endif
40
41extern unsigned int mxcsr_feature_mask;
1361b83a 42extern void fpu_init(void);
5d2bd700 43extern void eager_fpu_init(void);
1361b83a
LT
44
45DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
46
72a671ce
SS
47extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
48 struct task_struct *tsk);
49extern void convert_to_fxsr(struct task_struct *tsk,
50 const struct user_i387_ia32_struct *env);
51
1361b83a
LT
52extern user_regset_active_fn fpregs_active, xfpregs_active;
53extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
54 xstateregs_get;
55extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
56 xstateregs_set;
57
1361b83a
LT
58/*
59 * xstateregs_active == fpregs_active. Please refer to the comment
60 * at the definition of fpregs_active.
61 */
62#define xstateregs_active fpregs_active
63
1361b83a
LT
64#ifdef CONFIG_MATH_EMULATION
65extern void finit_soft_fpu(struct i387_soft_struct *soft);
66#else
67static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
68#endif
69
050902c0
SS
70static inline int is_ia32_compat_frame(void)
71{
72 return config_enabled(CONFIG_IA32_EMULATION) &&
73 test_thread_flag(TIF_IA32);
74}
75
76static inline int is_ia32_frame(void)
77{
78 return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
79}
80
81static inline int is_x32_frame(void)
82{
83 return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
84}
85
1361b83a
LT
86#define X87_FSW_ES (1 << 7) /* Exception Summary */
87
5d2bd700
SS
88static __always_inline __pure bool use_eager_fpu(void)
89{
c6b40691 90 return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
5d2bd700
SS
91}
92
1361b83a
LT
93static __always_inline __pure bool use_xsaveopt(void)
94{
c6b40691 95 return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
1361b83a
LT
96}
97
98static __always_inline __pure bool use_xsave(void)
99{
c6b40691 100 return static_cpu_has_safe(X86_FEATURE_XSAVE);
1361b83a
LT
101}
102
103static __always_inline __pure bool use_fxsr(void)
104{
c6b40691 105 return static_cpu_has_safe(X86_FEATURE_FXSR);
1361b83a
LT
106}
107
5d2bd700
SS
108static inline void fx_finit(struct i387_fxsave_struct *fx)
109{
110 memset(fx, 0, xstate_size);
111 fx->cwd = 0x37f;
a8615af4 112 fx->mxcsr = MXCSR_DEFAULT;
5d2bd700
SS
113}
114
1361b83a
LT
115extern void __sanitize_i387_state(struct task_struct *);
116
117static inline void sanitize_i387_state(struct task_struct *tsk)
118{
119 if (!use_xsaveopt())
120 return;
121 __sanitize_i387_state(tsk);
122}
123
49b8c695
PA
124#define user_insn(insn, output, input...) \
125({ \
126 int err; \
127 asm volatile(ASM_STAC "\n" \
128 "1:" #insn "\n\t" \
129 "2: " ASM_CLAC "\n" \
130 ".section .fixup,\"ax\"\n" \
131 "3: movl $-1,%[err]\n" \
132 " jmp 2b\n" \
133 ".previous\n" \
134 _ASM_EXTABLE(1b, 3b) \
135 : [err] "=r" (err), output \
136 : "0"(0), input); \
137 err; \
138})
139
0ca5bd0d
SS
140#define check_insn(insn, output, input...) \
141({ \
142 int err; \
143 asm volatile("1:" #insn "\n\t" \
144 "2:\n" \
145 ".section .fixup,\"ax\"\n" \
146 "3: movl $-1,%[err]\n" \
147 " jmp 2b\n" \
148 ".previous\n" \
149 _ASM_EXTABLE(1b, 3b) \
150 : [err] "=r" (err), output \
151 : "0"(0), input); \
152 err; \
153})
154
155static inline int fsave_user(struct i387_fsave_struct __user *fx)
1361b83a 156{
49b8c695 157 return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
1361b83a
LT
158}
159
160static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
161{
0ca5bd0d 162 if (config_enabled(CONFIG_X86_32))
49b8c695 163 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
0ca5bd0d 164 else if (config_enabled(CONFIG_AS_FXSAVEQ))
49b8c695 165 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
1361b83a 166
0ca5bd0d 167 /* See comment in fpu_fxsave() below. */
49b8c695 168 return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
1361b83a
LT
169}
170
0ca5bd0d 171static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
1361b83a 172{
0ca5bd0d
SS
173 if (config_enabled(CONFIG_X86_32))
174 return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
175 else if (config_enabled(CONFIG_AS_FXSAVEQ))
176 return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
1361b83a 177
0ca5bd0d
SS
178 /* See comment in fpu_fxsave() below. */
179 return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
180 "m" (*fx));
1361b83a
LT
181}
182
e139e955
PA
183static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
184{
185 if (config_enabled(CONFIG_X86_32))
186 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
187 else if (config_enabled(CONFIG_AS_FXSAVEQ))
188 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
189
190 /* See comment in fpu_fxsave() below. */
191 return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
192 "m" (*fx));
193}
194
0ca5bd0d 195static inline int frstor_checking(struct i387_fsave_struct *fx)
1361b83a 196{
0ca5bd0d 197 return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
e139e955
PA
198}
199
200static inline int frstor_user(struct i387_fsave_struct __user *fx)
201{
202 return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
1361b83a
LT
203}
204
205static inline void fpu_fxsave(struct fpu *fpu)
206{
0ca5bd0d
SS
207 if (config_enabled(CONFIG_X86_32))
208 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
209 else if (config_enabled(CONFIG_AS_FXSAVEQ))
6ca7a8a1 210 asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
0ca5bd0d
SS
211 else {
212 /* Using "rex64; fxsave %0" is broken because, if the memory
213 * operand uses any extended registers for addressing, a second
214 * REX prefix will be generated (to the assembler, rex64
215 * followed by semicolon is a separate instruction), and hence
216 * the 64-bitness is lost.
217 *
218 * Using "fxsaveq %0" would be the ideal choice, but is only
219 * supported starting with gas 2.16.
220 *
221 * Using, as a workaround, the properly prefixed form below
222 * isn't accepted by any binutils version so far released,
223 * complaining that the same type of prefix is used twice if
224 * an extended register is needed for addressing (fix submitted
225 * to mainline 2005-11-21).
226 *
227 * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
228 *
229 * This, however, we can work around by forcing the compiler to
230 * select an addressing mode that doesn't require extended
231 * registers.
232 */
233 asm volatile( "rex64/fxsave (%[fx])"
234 : "=m" (fpu->state->fxsave)
235 : [fx] "R" (&fpu->state->fxsave));
236 }
1361b83a
LT
237}
238
1361b83a
LT
239/*
240 * These must be called with preempt disabled. Returns
241 * 'true' if the FPU state is still intact.
242 */
243static inline int fpu_save_init(struct fpu *fpu)
244{
245 if (use_xsave()) {
246 fpu_xsave(fpu);
247
248 /*
249 * xsave header may indicate the init state of the FP.
250 */
251 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
252 return 1;
253 } else if (use_fxsr()) {
254 fpu_fxsave(fpu);
255 } else {
256 asm volatile("fnsave %[fx]; fwait"
257 : [fx] "=m" (fpu->state->fsave));
258 return 0;
259 }
260
261 /*
262 * If exceptions are pending, we need to clear them so
263 * that we don't randomly get exceptions later.
264 *
265 * FIXME! Is this perhaps only true for the old-style
266 * irq13 case? Maybe we could leave the x87 state
267 * intact otherwise?
268 */
269 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
270 asm volatile("fnclex");
271 return 0;
272 }
273 return 1;
274}
275
276static inline int __save_init_fpu(struct task_struct *tsk)
277{
278 return fpu_save_init(&tsk->thread.fpu);
279}
280
1361b83a
LT
281static inline int fpu_restore_checking(struct fpu *fpu)
282{
283 if (use_xsave())
0ca5bd0d
SS
284 return fpu_xrstor_checking(&fpu->state->xsave);
285 else if (use_fxsr())
286 return fxrstor_checking(&fpu->state->fxsave);
1361b83a 287 else
0ca5bd0d 288 return frstor_checking(&fpu->state->fsave);
1361b83a
LT
289}
290
291static inline int restore_fpu_checking(struct task_struct *tsk)
292{
6ca7a8a1
BP
293 /*
294 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
295 * pending. Clear the x87 state here by setting it to fixed values.
296 * "m" is a random variable that should be in L1.
297 */
9b13a93d 298 if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
26bef131
LT
299 asm volatile(
300 "fnclex\n\t"
301 "emms\n\t"
302 "fildl %P[addr]" /* set F?P to defined value */
303 : : [addr] "m" (tsk->thread.fpu.has_fpu));
304 }
1361b83a
LT
305
306 return fpu_restore_checking(&tsk->thread.fpu);
307}
308
309/*
310 * Software FPU state helpers. Careful: these need to
311 * be preemption protection *and* they need to be
312 * properly paired with the CR0.TS changes!
313 */
314static inline int __thread_has_fpu(struct task_struct *tsk)
315{
316 return tsk->thread.fpu.has_fpu;
317}
318
319/* Must be paired with an 'stts' after! */
320static inline void __thread_clear_has_fpu(struct task_struct *tsk)
321{
322 tsk->thread.fpu.has_fpu = 0;
c6ae41e7 323 this_cpu_write(fpu_owner_task, NULL);
1361b83a
LT
324}
325
326/* Must be paired with a 'clts' before! */
327static inline void __thread_set_has_fpu(struct task_struct *tsk)
328{
329 tsk->thread.fpu.has_fpu = 1;
c6ae41e7 330 this_cpu_write(fpu_owner_task, tsk);
1361b83a
LT
331}
332
333/*
334 * Encapsulate the CR0.TS handling together with the
335 * software flag.
336 *
337 * These generally need preemption protection to work,
338 * do try to avoid using these on their own.
339 */
340static inline void __thread_fpu_end(struct task_struct *tsk)
341{
342 __thread_clear_has_fpu(tsk);
5d2bd700 343 if (!use_eager_fpu())
304bceda 344 stts();
1361b83a
LT
345}
346
347static inline void __thread_fpu_begin(struct task_struct *tsk)
348{
31d96338 349 if (!use_eager_fpu())
304bceda 350 clts();
1361b83a
LT
351 __thread_set_has_fpu(tsk);
352}
353
304bceda
SS
354static inline void __drop_fpu(struct task_struct *tsk)
355{
356 if (__thread_has_fpu(tsk)) {
357 /* Ignore delayed exceptions from user space */
358 asm volatile("1: fwait\n"
359 "2:\n"
360 _ASM_EXTABLE(1b, 2b));
361 __thread_fpu_end(tsk);
362 }
363}
364
365static inline void drop_fpu(struct task_struct *tsk)
366{
367 /*
368 * Forget coprocessor state..
369 */
370 preempt_disable();
c375f15a 371 tsk->thread.fpu_counter = 0;
304bceda 372 __drop_fpu(tsk);
f4c36863 373 clear_stopped_child_used_math(tsk);
304bceda
SS
374 preempt_enable();
375}
376
377static inline void drop_init_fpu(struct task_struct *tsk)
378{
5d2bd700 379 if (!use_eager_fpu())
304bceda 380 drop_fpu(tsk);
5d2bd700
SS
381 else {
382 if (use_xsave())
383 xrstor_state(init_xstate_buf, -1);
384 else
385 fxrstor_checking(&init_xstate_buf->i387);
386 }
304bceda
SS
387}
388
1361b83a
LT
389/*
390 * FPU state switching for scheduling.
391 *
392 * This is a two-stage process:
393 *
394 * - switch_fpu_prepare() saves the old state and
395 * sets the new state of the CR0.TS bit. This is
396 * done within the context of the old process.
397 *
398 * - switch_fpu_finish() restores the new state as
399 * necessary.
400 */
401typedef struct { int preload; } fpu_switch_t;
402
403/*
644c1541
VP
404 * Must be run with preemption disabled: this clears the fpu_owner_task,
405 * on this CPU.
1361b83a 406 *
644c1541
VP
407 * This will disable any lazy FPU state restore of the current FPU state,
408 * but if the current thread owns the FPU, it will still be saved by.
1361b83a 409 */
644c1541
VP
410static inline void __cpu_disable_lazy_restore(unsigned int cpu)
411{
412 per_cpu(fpu_owner_task, cpu) = NULL;
413}
414
1361b83a
LT
415static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
416{
c6ae41e7 417 return new == this_cpu_read_stable(fpu_owner_task) &&
1361b83a
LT
418 cpu == new->thread.fpu.last_cpu;
419}
420
421static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
422{
423 fpu_switch_t fpu;
424
304bceda
SS
425 /*
426 * If the task has used the math, pre-load the FPU on xsave processors
427 * or if the past 5 consecutive context-switches used math.
428 */
5d2bd700 429 fpu.preload = tsk_used_math(new) && (use_eager_fpu() ||
c375f15a 430 new->thread.fpu_counter > 5);
1361b83a
LT
431 if (__thread_has_fpu(old)) {
432 if (!__save_init_fpu(old))
433 cpu = ~0;
434 old->thread.fpu.last_cpu = cpu;
435 old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */
436
437 /* Don't change CR0.TS if we just switch! */
438 if (fpu.preload) {
c375f15a 439 new->thread.fpu_counter++;
1361b83a
LT
440 __thread_set_has_fpu(new);
441 prefetch(new->thread.fpu.state);
5d2bd700 442 } else if (!use_eager_fpu())
1361b83a
LT
443 stts();
444 } else {
c375f15a 445 old->thread.fpu_counter = 0;
1361b83a
LT
446 old->thread.fpu.last_cpu = ~0;
447 if (fpu.preload) {
c375f15a 448 new->thread.fpu_counter++;
5d2bd700 449 if (!use_eager_fpu() && fpu_lazy_restore(new, cpu))
1361b83a
LT
450 fpu.preload = 0;
451 else
452 prefetch(new->thread.fpu.state);
453 __thread_fpu_begin(new);
454 }
455 }
456 return fpu;
457}
458
459/*
460 * By the time this gets called, we've already cleared CR0.TS and
461 * given the process the FPU if we are going to preload the FPU
462 * state - all we need to do is to conditionally restore the register
463 * state itself.
464 */
465static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
466{
467 if (fpu.preload) {
468 if (unlikely(restore_fpu_checking(new)))
304bceda 469 drop_init_fpu(new);
1361b83a
LT
470 }
471}
472
473/*
474 * Signal frame handlers...
475 */
72a671ce
SS
476extern int save_xstate_sig(void __user *buf, void __user *fx, int size);
477extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size);
1361b83a 478
72a671ce 479static inline int xstate_sigframe_size(void)
1361b83a 480{
72a671ce
SS
481 return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
482}
483
484static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
485{
486 void __user *buf_fx = buf;
487 int size = xstate_sigframe_size();
488
489 if (ia32_frame && use_fxsr()) {
490 buf_fx = buf + sizeof(struct i387_fsave_struct);
491 size += sizeof(struct i387_fsave_struct);
1361b83a 492 }
72a671ce
SS
493
494 return __restore_xstate_sig(buf, buf_fx, size);
1361b83a
LT
495}
496
497/*
377ffbcc 498 * Need to be preemption-safe.
1361b83a 499 *
377ffbcc
SS
500 * NOTE! user_fpu_begin() must be used only immediately before restoring
501 * it. This function does not do any save/restore on their own.
1361b83a 502 */
1361b83a
LT
503static inline void user_fpu_begin(void)
504{
505 preempt_disable();
506 if (!user_has_fpu())
507 __thread_fpu_begin(current);
508 preempt_enable();
509}
510
5d2bd700
SS
511static inline void __save_fpu(struct task_struct *tsk)
512{
f41d830f
FY
513 if (use_xsave()) {
514 if (unlikely(system_state == SYSTEM_BOOTING))
515 xsave_state_booting(&tsk->thread.fpu.state->xsave, -1);
516 else
517 xsave_state(&tsk->thread.fpu.state->xsave, -1);
518 } else
5d2bd700
SS
519 fpu_fxsave(&tsk->thread.fpu);
520}
521
1361b83a
LT
522/*
523 * These disable preemption on their own and are safe
524 */
525static inline void save_init_fpu(struct task_struct *tsk)
526{
527 WARN_ON_ONCE(!__thread_has_fpu(tsk));
304bceda 528
5d2bd700
SS
529 if (use_eager_fpu()) {
530 __save_fpu(tsk);
304bceda
SS
531 return;
532 }
533
1361b83a
LT
534 preempt_disable();
535 __save_init_fpu(tsk);
536 __thread_fpu_end(tsk);
537 preempt_enable();
538}
539
1361b83a
LT
540/*
541 * i387 state interaction
542 */
543static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
544{
545 if (cpu_has_fxsr) {
546 return tsk->thread.fpu.state->fxsave.cwd;
547 } else {
548 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
549 }
550}
551
552static inline unsigned short get_fpu_swd(struct task_struct *tsk)
553{
554 if (cpu_has_fxsr) {
555 return tsk->thread.fpu.state->fxsave.swd;
556 } else {
557 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
558 }
559}
560
561static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
562{
563 if (cpu_has_xmm) {
564 return tsk->thread.fpu.state->fxsave.mxcsr;
565 } else {
566 return MXCSR_DEFAULT;
567 }
568}
569
570static bool fpu_allocated(struct fpu *fpu)
571{
572 return fpu->state != NULL;
573}
574
575static inline int fpu_alloc(struct fpu *fpu)
576{
577 if (fpu_allocated(fpu))
578 return 0;
579 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
580 if (!fpu->state)
581 return -ENOMEM;
582 WARN_ON((unsigned long)fpu->state & 15);
583 return 0;
584}
585
586static inline void fpu_free(struct fpu *fpu)
587{
588 if (fpu->state) {
589 kmem_cache_free(task_xstate_cachep, fpu->state);
590 fpu->state = NULL;
591 }
592}
593
304bceda 594static inline void fpu_copy(struct task_struct *dst, struct task_struct *src)
1361b83a 595{
5d2bd700
SS
596 if (use_eager_fpu()) {
597 memset(&dst->thread.fpu.state->xsave, 0, xstate_size);
598 __save_fpu(dst);
304bceda
SS
599 } else {
600 struct fpu *dfpu = &dst->thread.fpu;
601 struct fpu *sfpu = &src->thread.fpu;
602
603 unlazy_fpu(src);
604 memcpy(dfpu->state, sfpu->state, xstate_size);
605 }
1361b83a
LT
606}
607
72a671ce
SS
608static inline unsigned long
609alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx,
610 unsigned long *size)
611{
612 unsigned long frame_size = xstate_sigframe_size();
613
614 *buf_fx = sp = round_down(sp - frame_size, 64);
615 if (ia32_frame && use_fxsr()) {
616 frame_size += sizeof(struct i387_fsave_struct);
617 sp -= sizeof(struct i387_fsave_struct);
618 }
619
620 *size = frame_size;
621 return sp;
622}
1361b83a
LT
623
624#endif
This page took 0.215377 seconds and 5 git commands to generate.