x86, signal: Cleanup ifdefs and is_ia32, is_x32
[deliverable/linux.git] / arch / x86 / include / asm / fpu-internal.h
CommitLineData
1361b83a
LT
1/*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
8 */
9
10#ifndef _FPU_INTERNAL_H
11#define _FPU_INTERNAL_H
12
13#include <linux/kernel_stat.h>
14#include <linux/regset.h>
050902c0 15#include <linux/compat.h>
1361b83a
LT
16#include <linux/slab.h>
17#include <asm/asm.h>
18#include <asm/cpufeature.h>
19#include <asm/processor.h>
20#include <asm/sigcontext.h>
21#include <asm/user.h>
22#include <asm/uaccess.h>
23#include <asm/xsave.h>
24
25extern unsigned int sig_xstate_size;
26extern void fpu_init(void);
27
28DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
29
30extern user_regset_active_fn fpregs_active, xfpregs_active;
31extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
32 xstateregs_get;
33extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
34 xstateregs_set;
35
1361b83a
LT
36/*
37 * xstateregs_active == fpregs_active. Please refer to the comment
38 * at the definition of fpregs_active.
39 */
40#define xstateregs_active fpregs_active
41
42extern struct _fpx_sw_bytes fx_sw_reserved;
43#ifdef CONFIG_IA32_EMULATION
44extern unsigned int sig_xstate_ia32_size;
45extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
46struct _fpstate_ia32;
47struct _xstate_ia32;
48extern int save_i387_xstate_ia32(void __user *buf);
49extern int restore_i387_xstate_ia32(void __user *buf);
50#endif
51
52#ifdef CONFIG_MATH_EMULATION
53extern void finit_soft_fpu(struct i387_soft_struct *soft);
54#else
55static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
56#endif
57
050902c0
SS
58static inline int is_ia32_compat_frame(void)
59{
60 return config_enabled(CONFIG_IA32_EMULATION) &&
61 test_thread_flag(TIF_IA32);
62}
63
64static inline int is_ia32_frame(void)
65{
66 return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
67}
68
69static inline int is_x32_frame(void)
70{
71 return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
72}
73
1361b83a
LT
74#define X87_FSW_ES (1 << 7) /* Exception Summary */
75
76static __always_inline __pure bool use_xsaveopt(void)
77{
78 return static_cpu_has(X86_FEATURE_XSAVEOPT);
79}
80
81static __always_inline __pure bool use_xsave(void)
82{
83 return static_cpu_has(X86_FEATURE_XSAVE);
84}
85
86static __always_inline __pure bool use_fxsr(void)
87{
88 return static_cpu_has(X86_FEATURE_FXSR);
89}
90
91extern void __sanitize_i387_state(struct task_struct *);
92
93static inline void sanitize_i387_state(struct task_struct *tsk)
94{
95 if (!use_xsaveopt())
96 return;
97 __sanitize_i387_state(tsk);
98}
99
100#ifdef CONFIG_X86_64
101static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
102{
103 int err;
104
105 /* See comment in fxsave() below. */
106#ifdef CONFIG_AS_FXSAVEQ
107 asm volatile("1: fxrstorq %[fx]\n\t"
108 "2:\n"
109 ".section .fixup,\"ax\"\n"
110 "3: movl $-1,%[err]\n"
111 " jmp 2b\n"
112 ".previous\n"
113 _ASM_EXTABLE(1b, 3b)
114 : [err] "=r" (err)
115 : [fx] "m" (*fx), "0" (0));
116#else
117 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
118 "2:\n"
119 ".section .fixup,\"ax\"\n"
120 "3: movl $-1,%[err]\n"
121 " jmp 2b\n"
122 ".previous\n"
123 _ASM_EXTABLE(1b, 3b)
124 : [err] "=r" (err)
125 : [fx] "R" (fx), "m" (*fx), "0" (0));
126#endif
127 return err;
128}
129
130static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
131{
132 int err;
133
134 /*
135 * Clear the bytes not touched by the fxsave and reserved
136 * for the SW usage.
137 */
138 err = __clear_user(&fx->sw_reserved,
139 sizeof(struct _fpx_sw_bytes));
140 if (unlikely(err))
141 return -EFAULT;
142
143 /* See comment in fxsave() below. */
144#ifdef CONFIG_AS_FXSAVEQ
145 asm volatile("1: fxsaveq %[fx]\n\t"
146 "2:\n"
147 ".section .fixup,\"ax\"\n"
148 "3: movl $-1,%[err]\n"
149 " jmp 2b\n"
150 ".previous\n"
151 _ASM_EXTABLE(1b, 3b)
152 : [err] "=r" (err), [fx] "=m" (*fx)
153 : "0" (0));
154#else
155 asm volatile("1: rex64/fxsave (%[fx])\n\t"
156 "2:\n"
157 ".section .fixup,\"ax\"\n"
158 "3: movl $-1,%[err]\n"
159 " jmp 2b\n"
160 ".previous\n"
161 _ASM_EXTABLE(1b, 3b)
162 : [err] "=r" (err), "=m" (*fx)
163 : [fx] "R" (fx), "0" (0));
164#endif
165 if (unlikely(err) &&
166 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
167 err = -EFAULT;
168 /* No need to clear here because the caller clears USED_MATH */
169 return err;
170}
171
172static inline void fpu_fxsave(struct fpu *fpu)
173{
174 /* Using "rex64; fxsave %0" is broken because, if the memory operand
175 uses any extended registers for addressing, a second REX prefix
176 will be generated (to the assembler, rex64 followed by semicolon
177 is a separate instruction), and hence the 64-bitness is lost. */
178
179#ifdef CONFIG_AS_FXSAVEQ
180 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
181 starting with gas 2.16. */
182 __asm__ __volatile__("fxsaveq %0"
183 : "=m" (fpu->state->fxsave));
184#else
185 /* Using, as a workaround, the properly prefixed form below isn't
186 accepted by any binutils version so far released, complaining that
187 the same type of prefix is used twice if an extended register is
188 needed for addressing (fix submitted to mainline 2005-11-21).
189 asm volatile("rex64/fxsave %0"
190 : "=m" (fpu->state->fxsave));
191 This, however, we can work around by forcing the compiler to select
192 an addressing mode that doesn't require extended registers. */
193 asm volatile("rex64/fxsave (%[fx])"
194 : "=m" (fpu->state->fxsave)
195 : [fx] "R" (&fpu->state->fxsave));
196#endif
197}
198
050902c0
SS
199int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
200 compat_sigset_t *set, struct pt_regs *regs);
201int ia32_setup_frame(int sig, struct k_sigaction *ka,
202 compat_sigset_t *set, struct pt_regs *regs);
203
1361b83a
LT
204#else /* CONFIG_X86_32 */
205
206/* perform fxrstor iff the processor has extended states, otherwise frstor */
207static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
208{
209 /*
210 * The "nop" is needed to make the instructions the same
211 * length.
212 */
213 alternative_input(
214 "nop ; frstor %1",
215 "fxrstor %1",
216 X86_FEATURE_FXSR,
217 "m" (*fx));
218
219 return 0;
220}
221
222static inline void fpu_fxsave(struct fpu *fpu)
223{
224 asm volatile("fxsave %[fx]"
225 : [fx] "=m" (fpu->state->fxsave));
226}
227
050902c0
SS
228#define ia32_setup_frame __setup_frame
229#define ia32_setup_rt_frame __setup_rt_frame
230
1361b83a
LT
231#endif /* CONFIG_X86_64 */
232
233/*
234 * These must be called with preempt disabled. Returns
235 * 'true' if the FPU state is still intact.
236 */
237static inline int fpu_save_init(struct fpu *fpu)
238{
239 if (use_xsave()) {
240 fpu_xsave(fpu);
241
242 /*
243 * xsave header may indicate the init state of the FP.
244 */
245 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
246 return 1;
247 } else if (use_fxsr()) {
248 fpu_fxsave(fpu);
249 } else {
250 asm volatile("fnsave %[fx]; fwait"
251 : [fx] "=m" (fpu->state->fsave));
252 return 0;
253 }
254
255 /*
256 * If exceptions are pending, we need to clear them so
257 * that we don't randomly get exceptions later.
258 *
259 * FIXME! Is this perhaps only true for the old-style
260 * irq13 case? Maybe we could leave the x87 state
261 * intact otherwise?
262 */
263 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
264 asm volatile("fnclex");
265 return 0;
266 }
267 return 1;
268}
269
270static inline int __save_init_fpu(struct task_struct *tsk)
271{
272 return fpu_save_init(&tsk->thread.fpu);
273}
274
275static inline int fpu_fxrstor_checking(struct fpu *fpu)
276{
277 return fxrstor_checking(&fpu->state->fxsave);
278}
279
280static inline int fpu_restore_checking(struct fpu *fpu)
281{
282 if (use_xsave())
283 return fpu_xrstor_checking(fpu);
284 else
285 return fpu_fxrstor_checking(fpu);
286}
287
288static inline int restore_fpu_checking(struct task_struct *tsk)
289{
290 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
291 is pending. Clear the x87 state here by setting it to fixed
292 values. "m" is a random variable that should be in L1 */
293 alternative_input(
294 ASM_NOP8 ASM_NOP2,
295 "emms\n\t" /* clear stack tags */
296 "fildl %P[addr]", /* set F?P to defined value */
297 X86_FEATURE_FXSAVE_LEAK,
298 [addr] "m" (tsk->thread.fpu.has_fpu));
299
300 return fpu_restore_checking(&tsk->thread.fpu);
301}
302
303/*
304 * Software FPU state helpers. Careful: these need to
305 * be preemption protection *and* they need to be
306 * properly paired with the CR0.TS changes!
307 */
308static inline int __thread_has_fpu(struct task_struct *tsk)
309{
310 return tsk->thread.fpu.has_fpu;
311}
312
313/* Must be paired with an 'stts' after! */
314static inline void __thread_clear_has_fpu(struct task_struct *tsk)
315{
316 tsk->thread.fpu.has_fpu = 0;
c6ae41e7 317 this_cpu_write(fpu_owner_task, NULL);
1361b83a
LT
318}
319
320/* Must be paired with a 'clts' before! */
321static inline void __thread_set_has_fpu(struct task_struct *tsk)
322{
323 tsk->thread.fpu.has_fpu = 1;
c6ae41e7 324 this_cpu_write(fpu_owner_task, tsk);
1361b83a
LT
325}
326
327/*
328 * Encapsulate the CR0.TS handling together with the
329 * software flag.
330 *
331 * These generally need preemption protection to work,
332 * do try to avoid using these on their own.
333 */
334static inline void __thread_fpu_end(struct task_struct *tsk)
335{
336 __thread_clear_has_fpu(tsk);
337 stts();
338}
339
340static inline void __thread_fpu_begin(struct task_struct *tsk)
341{
342 clts();
343 __thread_set_has_fpu(tsk);
344}
345
346/*
347 * FPU state switching for scheduling.
348 *
349 * This is a two-stage process:
350 *
351 * - switch_fpu_prepare() saves the old state and
352 * sets the new state of the CR0.TS bit. This is
353 * done within the context of the old process.
354 *
355 * - switch_fpu_finish() restores the new state as
356 * necessary.
357 */
358typedef struct { int preload; } fpu_switch_t;
359
360/*
361 * FIXME! We could do a totally lazy restore, but we need to
362 * add a per-cpu "this was the task that last touched the FPU
363 * on this CPU" variable, and the task needs to have a "I last
364 * touched the FPU on this CPU" and check them.
365 *
366 * We don't do that yet, so "fpu_lazy_restore()" always returns
367 * false, but some day..
368 */
369static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
370{
c6ae41e7 371 return new == this_cpu_read_stable(fpu_owner_task) &&
1361b83a
LT
372 cpu == new->thread.fpu.last_cpu;
373}
374
375static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
376{
377 fpu_switch_t fpu;
378
379 fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
380 if (__thread_has_fpu(old)) {
381 if (!__save_init_fpu(old))
382 cpu = ~0;
383 old->thread.fpu.last_cpu = cpu;
384 old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */
385
386 /* Don't change CR0.TS if we just switch! */
387 if (fpu.preload) {
388 new->fpu_counter++;
389 __thread_set_has_fpu(new);
390 prefetch(new->thread.fpu.state);
391 } else
392 stts();
393 } else {
394 old->fpu_counter = 0;
395 old->thread.fpu.last_cpu = ~0;
396 if (fpu.preload) {
397 new->fpu_counter++;
398 if (fpu_lazy_restore(new, cpu))
399 fpu.preload = 0;
400 else
401 prefetch(new->thread.fpu.state);
402 __thread_fpu_begin(new);
403 }
404 }
405 return fpu;
406}
407
408/*
409 * By the time this gets called, we've already cleared CR0.TS and
410 * given the process the FPU if we are going to preload the FPU
411 * state - all we need to do is to conditionally restore the register
412 * state itself.
413 */
414static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
415{
416 if (fpu.preload) {
417 if (unlikely(restore_fpu_checking(new)))
418 __thread_fpu_end(new);
419 }
420}
421
422/*
423 * Signal frame handlers...
424 */
425extern int save_i387_xstate(void __user *buf);
426extern int restore_i387_xstate(void __user *buf);
427
428static inline void __clear_fpu(struct task_struct *tsk)
429{
430 if (__thread_has_fpu(tsk)) {
431 /* Ignore delayed exceptions from user space */
432 asm volatile("1: fwait\n"
433 "2:\n"
434 _ASM_EXTABLE(1b, 2b));
435 __thread_fpu_end(tsk);
436 }
437}
438
439/*
440 * The actual user_fpu_begin/end() functions
441 * need to be preemption-safe.
442 *
443 * NOTE! user_fpu_end() must be used only after you
444 * have saved the FP state, and user_fpu_begin() must
445 * be used only immediately before restoring it.
446 * These functions do not do any save/restore on
447 * their own.
448 */
449static inline void user_fpu_end(void)
450{
451 preempt_disable();
452 __thread_fpu_end(current);
453 preempt_enable();
454}
455
456static inline void user_fpu_begin(void)
457{
458 preempt_disable();
459 if (!user_has_fpu())
460 __thread_fpu_begin(current);
461 preempt_enable();
462}
463
464/*
465 * These disable preemption on their own and are safe
466 */
467static inline void save_init_fpu(struct task_struct *tsk)
468{
469 WARN_ON_ONCE(!__thread_has_fpu(tsk));
470 preempt_disable();
471 __save_init_fpu(tsk);
472 __thread_fpu_end(tsk);
473 preempt_enable();
474}
475
476static inline void clear_fpu(struct task_struct *tsk)
477{
478 preempt_disable();
479 __clear_fpu(tsk);
480 preempt_enable();
481}
482
483/*
484 * i387 state interaction
485 */
486static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
487{
488 if (cpu_has_fxsr) {
489 return tsk->thread.fpu.state->fxsave.cwd;
490 } else {
491 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
492 }
493}
494
495static inline unsigned short get_fpu_swd(struct task_struct *tsk)
496{
497 if (cpu_has_fxsr) {
498 return tsk->thread.fpu.state->fxsave.swd;
499 } else {
500 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
501 }
502}
503
504static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
505{
506 if (cpu_has_xmm) {
507 return tsk->thread.fpu.state->fxsave.mxcsr;
508 } else {
509 return MXCSR_DEFAULT;
510 }
511}
512
513static bool fpu_allocated(struct fpu *fpu)
514{
515 return fpu->state != NULL;
516}
517
518static inline int fpu_alloc(struct fpu *fpu)
519{
520 if (fpu_allocated(fpu))
521 return 0;
522 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
523 if (!fpu->state)
524 return -ENOMEM;
525 WARN_ON((unsigned long)fpu->state & 15);
526 return 0;
527}
528
529static inline void fpu_free(struct fpu *fpu)
530{
531 if (fpu->state) {
532 kmem_cache_free(task_xstate_cachep, fpu->state);
533 fpu->state = NULL;
534 }
535}
536
537static inline void fpu_copy(struct fpu *dst, struct fpu *src)
538{
539 memcpy(dst->state, src->state, xstate_size);
540}
541
542extern void fpu_finit(struct fpu *fpu);
543
544#endif
This page took 0.0846710000000001 seconds and 5 git commands to generate.