Commit | Line | Data |
---|---|---|
1eeaed76 RM |
1 | /* |
2 | * Copyright (C) 1994 Linus Torvalds | |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | * x86-64 work by Andi Kleen 2002 | |
8 | */ | |
9 | ||
1965aae3 PA |
10 | #ifndef _ASM_X86_I387_H |
11 | #define _ASM_X86_I387_H | |
1eeaed76 | 12 | |
3b0d6596 HX |
13 | #ifndef __ASSEMBLY__ |
14 | ||
1eeaed76 RM |
15 | #include <linux/sched.h> |
16 | #include <linux/kernel_stat.h> | |
17 | #include <linux/regset.h> | |
e4914012 | 18 | #include <linux/hardirq.h> |
86603283 | 19 | #include <linux/slab.h> |
92c37fa3 | 20 | #include <asm/asm.h> |
c9775b4c | 21 | #include <asm/cpufeature.h> |
1eeaed76 RM |
22 | #include <asm/processor.h> |
23 | #include <asm/sigcontext.h> | |
24 | #include <asm/user.h> | |
25 | #include <asm/uaccess.h> | |
dc1e35c6 | 26 | #include <asm/xsave.h> |
1eeaed76 | 27 | |
3c1c7f10 | 28 | extern unsigned int sig_xstate_size; |
1eeaed76 | 29 | extern void fpu_init(void); |
1eeaed76 | 30 | extern void mxcsr_feature_mask_init(void); |
aa283f49 | 31 | extern int init_fpu(struct task_struct *child); |
1eeaed76 | 32 | extern asmlinkage void math_state_restore(void); |
e6e9cac8 | 33 | extern void __math_state_restore(void); |
36454936 | 34 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); |
1eeaed76 RM |
35 | |
36 | extern user_regset_active_fn fpregs_active, xfpregs_active; | |
5b3efd50 SS |
37 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, |
38 | xstateregs_get; | |
39 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, | |
40 | xstateregs_set; | |
41 | ||
42 | /* | |
43 | * xstateregs_active == fpregs_active. Please refer to the comment | |
44 | * at the definition of fpregs_active. | |
45 | */ | |
46 | #define xstateregs_active fpregs_active | |
1eeaed76 | 47 | |
c37b5efe | 48 | extern struct _fpx_sw_bytes fx_sw_reserved; |
1eeaed76 | 49 | #ifdef CONFIG_IA32_EMULATION |
3c1c7f10 | 50 | extern unsigned int sig_xstate_ia32_size; |
c37b5efe | 51 | extern struct _fpx_sw_bytes fx_sw_reserved_ia32; |
1eeaed76 | 52 | struct _fpstate_ia32; |
ab513701 SS |
53 | struct _xstate_ia32; |
54 | extern int save_i387_xstate_ia32(void __user *buf); | |
55 | extern int restore_i387_xstate_ia32(void __user *buf); | |
1eeaed76 RM |
56 | #endif |
57 | ||
8eb91a57 BG |
58 | #ifdef CONFIG_MATH_EMULATION |
59 | extern void finit_soft_fpu(struct i387_soft_struct *soft); | |
60 | #else | |
61 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} | |
62 | #endif | |
63 | ||
b359e8a4 SS |
64 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ |
65 | ||
29104e10 SS |
66 | static __always_inline __pure bool use_xsaveopt(void) |
67 | { | |
6bad06b7 | 68 | return static_cpu_has(X86_FEATURE_XSAVEOPT); |
29104e10 SS |
69 | } |
70 | ||
c9775b4c | 71 | static __always_inline __pure bool use_xsave(void) |
c9ad4882 | 72 | { |
c9775b4c | 73 | return static_cpu_has(X86_FEATURE_XSAVE); |
c9ad4882 AK |
74 | } |
75 | ||
58a992b9 BG |
76 | static __always_inline __pure bool use_fxsr(void) |
77 | { | |
78 | return static_cpu_has(X86_FEATURE_FXSR); | |
79 | } | |
80 | ||
29104e10 SS |
81 | extern void __sanitize_i387_state(struct task_struct *); |
82 | ||
83 | static inline void sanitize_i387_state(struct task_struct *tsk) | |
84 | { | |
85 | if (!use_xsaveopt()) | |
86 | return; | |
87 | __sanitize_i387_state(tsk); | |
88 | } | |
89 | ||
1eeaed76 | 90 | #ifdef CONFIG_X86_64 |
b359e8a4 | 91 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
1eeaed76 RM |
92 | { |
93 | int err; | |
94 | ||
82024135 | 95 | /* See comment in fxsave() below. */ |
1eeaed76 RM |
96 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" |
97 | "2:\n" | |
98 | ".section .fixup,\"ax\"\n" | |
99 | "3: movl $-1,%[err]\n" | |
100 | " jmp 2b\n" | |
101 | ".previous\n" | |
affe6637 | 102 | _ASM_EXTABLE(1b, 3b) |
1eeaed76 | 103 | : [err] "=r" (err) |
82024135 | 104 | : [fx] "R" (fx), "m" (*fx), "0" (0)); |
1eeaed76 RM |
105 | return err; |
106 | } | |
107 | ||
1eeaed76 RM |
108 | /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception |
109 | is pending. Clear the x87 state here by setting it to fixed | |
110 | values. The kernel data segment can be sometimes 0 and sometimes | |
111 | new user value. Both should be ok. | |
112 | Use the PDA as safe address because it should be already in L1. */ | |
86603283 | 113 | static inline void fpu_clear(struct fpu *fpu) |
1eeaed76 | 114 | { |
86603283 AK |
115 | struct xsave_struct *xstate = &fpu->state->xsave; |
116 | struct i387_fxsave_struct *fx = &fpu->state->fxsave; | |
b359e8a4 SS |
117 | |
118 | /* | |
119 | * xsave header may indicate the init state of the FP. | |
120 | */ | |
c9ad4882 | 121 | if (use_xsave() && |
b359e8a4 SS |
122 | !(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) |
123 | return; | |
124 | ||
1eeaed76 | 125 | if (unlikely(fx->swd & X87_FSW_ES)) |
affe6637 | 126 | asm volatile("fnclex"); |
1eeaed76 | 127 | alternative_input(ASM_NOP8 ASM_NOP2, |
affe6637 JP |
128 | " emms\n" /* clear stack tags */ |
129 | " fildl %%gs:0", /* load to clear state */ | |
130 | X86_FEATURE_FXSAVE_LEAK); | |
1eeaed76 RM |
131 | } |
132 | ||
86603283 AK |
133 | static inline void clear_fpu_state(struct task_struct *tsk) |
134 | { | |
135 | fpu_clear(&tsk->thread.fpu); | |
136 | } | |
137 | ||
c37b5efe | 138 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) |
1eeaed76 RM |
139 | { |
140 | int err; | |
141 | ||
8e221b6d SS |
142 | /* |
143 | * Clear the bytes not touched by the fxsave and reserved | |
144 | * for the SW usage. | |
145 | */ | |
146 | err = __clear_user(&fx->sw_reserved, | |
147 | sizeof(struct _fpx_sw_bytes)); | |
148 | if (unlikely(err)) | |
149 | return -EFAULT; | |
150 | ||
82024135 | 151 | /* See comment in fxsave() below. */ |
1eeaed76 RM |
152 | asm volatile("1: rex64/fxsave (%[fx])\n\t" |
153 | "2:\n" | |
154 | ".section .fixup,\"ax\"\n" | |
155 | "3: movl $-1,%[err]\n" | |
156 | " jmp 2b\n" | |
157 | ".previous\n" | |
affe6637 | 158 | _ASM_EXTABLE(1b, 3b) |
1eeaed76 | 159 | : [err] "=r" (err), "=m" (*fx) |
82024135 | 160 | : [fx] "R" (fx), "0" (0)); |
affe6637 JP |
161 | if (unlikely(err) && |
162 | __clear_user(fx, sizeof(struct i387_fxsave_struct))) | |
1eeaed76 RM |
163 | err = -EFAULT; |
164 | /* No need to clear here because the caller clears USED_MATH */ | |
165 | return err; | |
166 | } | |
167 | ||
86603283 | 168 | static inline void fpu_fxsave(struct fpu *fpu) |
1eeaed76 RM |
169 | { |
170 | /* Using "rex64; fxsave %0" is broken because, if the memory operand | |
171 | uses any extended registers for addressing, a second REX prefix | |
172 | will be generated (to the assembler, rex64 followed by semicolon | |
82024135 BG |
173 | is a separate instruction), and hence the 64-bitness is lost. |
174 | Using "fxsaveq %0" would be the ideal choice, but is only supported | |
175 | starting with gas 2.16. | |
176 | asm volatile("fxsaveq %0" | |
177 | : "=m" (fpu->state->fxsave)); | |
178 | Using, as a workaround, the properly prefixed form below isn't | |
1eeaed76 RM |
179 | accepted by any binutils version so far released, complaining that |
180 | the same type of prefix is used twice if an extended register is | |
82024135 BG |
181 | needed for addressing (fix submitted to mainline 2005-11-21). |
182 | asm volatile("rex64/fxsave %0" | |
183 | : "=m" (fpu->state->fxsave)); | |
184 | This, however, we can work around by forcing the compiler to select | |
1eeaed76 | 185 | an addressing mode that doesn't require extended registers. */ |
82024135 BG |
186 | asm volatile("rex64/fxsave (%[fx])" |
187 | : "=m" (fpu->state->fxsave) | |
188 | : [fx] "R" (&fpu->state->fxsave)); | |
b359e8a4 SS |
189 | } |
190 | ||
86603283 | 191 | static inline void fpu_save_init(struct fpu *fpu) |
b359e8a4 | 192 | { |
c9ad4882 | 193 | if (use_xsave()) |
86603283 | 194 | fpu_xsave(fpu); |
b359e8a4 | 195 | else |
86603283 | 196 | fpu_fxsave(fpu); |
b359e8a4 | 197 | |
86603283 AK |
198 | fpu_clear(fpu); |
199 | } | |
200 | ||
1eeaed76 RM |
201 | #else /* CONFIG_X86_32 */ |
202 | ||
34ba476a JS |
203 | /* perform fxrstor iff the processor has extended states, otherwise frstor */ |
204 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |
1eeaed76 RM |
205 | { |
206 | /* | |
207 | * The "nop" is needed to make the instructions the same | |
208 | * length. | |
209 | */ | |
210 | alternative_input( | |
211 | "nop ; frstor %1", | |
212 | "fxrstor %1", | |
213 | X86_FEATURE_FXSR, | |
34ba476a JS |
214 | "m" (*fx)); |
215 | ||
fcb2ac5b | 216 | return 0; |
1eeaed76 RM |
217 | } |
218 | ||
58a992b9 BG |
219 | static inline void fpu_fxsave(struct fpu *fpu) |
220 | { | |
221 | asm volatile("fxsave %[fx]" | |
222 | : [fx] "=m" (fpu->state->fxsave)); | |
223 | } | |
224 | ||
1eeaed76 RM |
225 | /* We need a safe address that is cheap to find and that is already |
226 | in L1 during context switch. The best choices are unfortunately | |
227 | different for UP and SMP */ | |
228 | #ifdef CONFIG_SMP | |
229 | #define safe_address (__per_cpu_offset[0]) | |
230 | #else | |
231 | #define safe_address (kstat_cpu(0).cpustat.user) | |
232 | #endif | |
233 | ||
234 | /* | |
235 | * These must be called with preempt disabled | |
236 | */ | |
86603283 | 237 | static inline void fpu_save_init(struct fpu *fpu) |
1eeaed76 | 238 | { |
c9ad4882 | 239 | if (use_xsave()) { |
86603283 | 240 | fpu_xsave(fpu); |
b359e8a4 SS |
241 | |
242 | /* | |
243 | * xsave header may indicate the init state of the FP. | |
244 | */ | |
58a992b9 BG |
245 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) |
246 | return; | |
247 | } else if (use_fxsr()) { | |
248 | fpu_fxsave(fpu); | |
249 | } else { | |
250 | asm volatile("fsave %[fx]; fwait" | |
251 | : [fx] "=m" (fpu->state->fsave)); | |
252 | return; | |
b359e8a4 SS |
253 | } |
254 | ||
58a992b9 BG |
255 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) |
256 | asm volatile("fnclex"); | |
257 | ||
1eeaed76 RM |
258 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception |
259 | is pending. Clear the x87 state here by setting it to fixed | |
260 | values. safe_address is a random variable that should be in L1 */ | |
261 | alternative_input( | |
262 | GENERIC_NOP8 GENERIC_NOP2, | |
263 | "emms\n\t" /* clear stack tags */ | |
264 | "fildl %[addr]", /* set F?P to defined value */ | |
265 | X86_FEATURE_FXSAVE_LEAK, | |
266 | [addr] "m" (safe_address)); | |
86603283 AK |
267 | } |
268 | ||
bfd946cb BG |
269 | #endif /* CONFIG_X86_64 */ |
270 | ||
86603283 AK |
271 | static inline void __save_init_fpu(struct task_struct *tsk) |
272 | { | |
273 | fpu_save_init(&tsk->thread.fpu); | |
1eeaed76 RM |
274 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
275 | } | |
276 | ||
86603283 AK |
277 | static inline int fpu_fxrstor_checking(struct fpu *fpu) |
278 | { | |
279 | return fxrstor_checking(&fpu->state->fxsave); | |
280 | } | |
281 | ||
282 | static inline int fpu_restore_checking(struct fpu *fpu) | |
34ba476a | 283 | { |
c9ad4882 | 284 | if (use_xsave()) |
86603283 | 285 | return fpu_xrstor_checking(fpu); |
34ba476a | 286 | else |
86603283 AK |
287 | return fpu_fxrstor_checking(fpu); |
288 | } | |
289 | ||
290 | static inline int restore_fpu_checking(struct task_struct *tsk) | |
291 | { | |
292 | return fpu_restore_checking(&tsk->thread.fpu); | |
34ba476a JS |
293 | } |
294 | ||
1eeaed76 RM |
295 | /* |
296 | * Signal frame handlers... | |
297 | */ | |
ab513701 SS |
298 | extern int save_i387_xstate(void __user *buf); |
299 | extern int restore_i387_xstate(void __user *buf); | |
1eeaed76 RM |
300 | |
301 | static inline void __unlazy_fpu(struct task_struct *tsk) | |
302 | { | |
303 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | |
304 | __save_init_fpu(tsk); | |
305 | stts(); | |
306 | } else | |
307 | tsk->fpu_counter = 0; | |
308 | } | |
309 | ||
310 | static inline void __clear_fpu(struct task_struct *tsk) | |
311 | { | |
312 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | |
51115d4d BG |
313 | /* Ignore delayed exceptions from user space */ |
314 | asm volatile("1: fwait\n" | |
315 | "2:\n" | |
316 | _ASM_EXTABLE(1b, 2b)); | |
1eeaed76 RM |
317 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
318 | stts(); | |
319 | } | |
320 | } | |
321 | ||
322 | static inline void kernel_fpu_begin(void) | |
323 | { | |
324 | struct thread_info *me = current_thread_info(); | |
325 | preempt_disable(); | |
326 | if (me->status & TS_USEDFPU) | |
327 | __save_init_fpu(me->task); | |
328 | else | |
329 | clts(); | |
330 | } | |
331 | ||
332 | static inline void kernel_fpu_end(void) | |
333 | { | |
334 | stts(); | |
335 | preempt_enable(); | |
336 | } | |
337 | ||
ae4b688d HY |
338 | static inline bool irq_fpu_usable(void) |
339 | { | |
340 | struct pt_regs *regs; | |
341 | ||
342 | return !in_interrupt() || !(regs = get_irq_regs()) || \ | |
343 | user_mode(regs) || (read_cr0() & X86_CR0_TS); | |
344 | } | |
345 | ||
e4914012 SS |
346 | /* |
347 | * Some instructions like VIA's padlock instructions generate a spurious | |
348 | * DNA fault but don't modify SSE registers. And these instructions | |
0b8c3d5a CE |
349 | * get used from interrupt context as well. To prevent these kernel instructions |
350 | * in interrupt context interacting wrongly with other user/kernel fpu usage, we | |
e4914012 SS |
351 | * should use them only in the context of irq_ts_save/restore() |
352 | */ | |
353 | static inline int irq_ts_save(void) | |
354 | { | |
355 | /* | |
0b8c3d5a CE |
356 | * If in process context and not atomic, we can take a spurious DNA fault. |
357 | * Otherwise, doing clts() in process context requires disabling preemption | |
358 | * or some heavy lifting like kernel_fpu_begin() | |
e4914012 | 359 | */ |
0b8c3d5a | 360 | if (!in_atomic()) |
e4914012 SS |
361 | return 0; |
362 | ||
363 | if (read_cr0() & X86_CR0_TS) { | |
364 | clts(); | |
365 | return 1; | |
366 | } | |
367 | ||
368 | return 0; | |
369 | } | |
370 | ||
371 | static inline void irq_ts_restore(int TS_state) | |
372 | { | |
373 | if (TS_state) | |
374 | stts(); | |
375 | } | |
376 | ||
1eeaed76 RM |
377 | /* |
378 | * These disable preemption on their own and are safe | |
379 | */ | |
380 | static inline void save_init_fpu(struct task_struct *tsk) | |
381 | { | |
382 | preempt_disable(); | |
383 | __save_init_fpu(tsk); | |
384 | stts(); | |
385 | preempt_enable(); | |
386 | } | |
387 | ||
388 | static inline void unlazy_fpu(struct task_struct *tsk) | |
389 | { | |
390 | preempt_disable(); | |
391 | __unlazy_fpu(tsk); | |
392 | preempt_enable(); | |
393 | } | |
394 | ||
395 | static inline void clear_fpu(struct task_struct *tsk) | |
396 | { | |
397 | preempt_disable(); | |
398 | __clear_fpu(tsk); | |
399 | preempt_enable(); | |
400 | } | |
401 | ||
1eeaed76 RM |
402 | /* |
403 | * i387 state interaction | |
404 | */ | |
405 | static inline unsigned short get_fpu_cwd(struct task_struct *tsk) | |
406 | { | |
407 | if (cpu_has_fxsr) { | |
86603283 | 408 | return tsk->thread.fpu.state->fxsave.cwd; |
1eeaed76 | 409 | } else { |
86603283 | 410 | return (unsigned short)tsk->thread.fpu.state->fsave.cwd; |
1eeaed76 RM |
411 | } |
412 | } | |
413 | ||
414 | static inline unsigned short get_fpu_swd(struct task_struct *tsk) | |
415 | { | |
416 | if (cpu_has_fxsr) { | |
86603283 | 417 | return tsk->thread.fpu.state->fxsave.swd; |
1eeaed76 | 418 | } else { |
86603283 | 419 | return (unsigned short)tsk->thread.fpu.state->fsave.swd; |
1eeaed76 RM |
420 | } |
421 | } | |
422 | ||
423 | static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) | |
424 | { | |
425 | if (cpu_has_xmm) { | |
86603283 | 426 | return tsk->thread.fpu.state->fxsave.mxcsr; |
1eeaed76 RM |
427 | } else { |
428 | return MXCSR_DEFAULT; | |
429 | } | |
430 | } | |
431 | ||
86603283 AK |
432 | static bool fpu_allocated(struct fpu *fpu) |
433 | { | |
434 | return fpu->state != NULL; | |
435 | } | |
436 | ||
437 | static inline int fpu_alloc(struct fpu *fpu) | |
438 | { | |
439 | if (fpu_allocated(fpu)) | |
440 | return 0; | |
441 | fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL); | |
442 | if (!fpu->state) | |
443 | return -ENOMEM; | |
444 | WARN_ON((unsigned long)fpu->state & 15); | |
445 | return 0; | |
446 | } | |
447 | ||
448 | static inline void fpu_free(struct fpu *fpu) | |
449 | { | |
450 | if (fpu->state) { | |
451 | kmem_cache_free(task_xstate_cachep, fpu->state); | |
452 | fpu->state = NULL; | |
453 | } | |
454 | } | |
455 | ||
456 | static inline void fpu_copy(struct fpu *dst, struct fpu *src) | |
457 | { | |
458 | memcpy(dst->state, src->state, xstate_size); | |
459 | } | |
460 | ||
5ee481da SY |
461 | extern void fpu_finit(struct fpu *fpu); |
462 | ||
3b0d6596 HX |
463 | #endif /* __ASSEMBLY__ */ |
464 | ||
1965aae3 | 465 | #endif /* _ASM_X86_I387_H */ |