Commit | Line | Data |
---|---|---|
1361b83a LT |
1 | /* |
2 | * Copyright (C) 1994 Linus Torvalds | |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | * x86-64 work by Andi Kleen 2002 | |
8 | */ | |
9 | ||
10 | #ifndef _FPU_INTERNAL_H | |
11 | #define _FPU_INTERNAL_H | |
12 | ||
13 | #include <linux/kernel_stat.h> | |
14 | #include <linux/regset.h> | |
15 | #include <linux/slab.h> | |
16 | #include <asm/asm.h> | |
17 | #include <asm/cpufeature.h> | |
18 | #include <asm/processor.h> | |
19 | #include <asm/sigcontext.h> | |
20 | #include <asm/user.h> | |
21 | #include <asm/uaccess.h> | |
22 | #include <asm/xsave.h> | |
23 | ||
24 | extern unsigned int sig_xstate_size; | |
25 | extern void fpu_init(void); | |
26 | ||
27 | DECLARE_PER_CPU(struct task_struct *, fpu_owner_task); | |
28 | ||
29 | extern user_regset_active_fn fpregs_active, xfpregs_active; | |
30 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, | |
31 | xstateregs_get; | |
32 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, | |
33 | xstateregs_set; | |
34 | ||
35 | ||
36 | /* | |
37 | * xstateregs_active == fpregs_active. Please refer to the comment | |
38 | * at the definition of fpregs_active. | |
39 | */ | |
40 | #define xstateregs_active fpregs_active | |
41 | ||
42 | extern struct _fpx_sw_bytes fx_sw_reserved; | |
43 | #ifdef CONFIG_IA32_EMULATION | |
44 | extern unsigned int sig_xstate_ia32_size; | |
45 | extern struct _fpx_sw_bytes fx_sw_reserved_ia32; | |
46 | struct _fpstate_ia32; | |
47 | struct _xstate_ia32; | |
48 | extern int save_i387_xstate_ia32(void __user *buf); | |
49 | extern int restore_i387_xstate_ia32(void __user *buf); | |
50 | #endif | |
51 | ||
52 | #ifdef CONFIG_MATH_EMULATION | |
53 | extern void finit_soft_fpu(struct i387_soft_struct *soft); | |
54 | #else | |
55 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} | |
56 | #endif | |
57 | ||
58 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | |
59 | ||
60 | static __always_inline __pure bool use_xsaveopt(void) | |
61 | { | |
62 | return static_cpu_has(X86_FEATURE_XSAVEOPT); | |
63 | } | |
64 | ||
65 | static __always_inline __pure bool use_xsave(void) | |
66 | { | |
67 | return static_cpu_has(X86_FEATURE_XSAVE); | |
68 | } | |
69 | ||
70 | static __always_inline __pure bool use_fxsr(void) | |
71 | { | |
72 | return static_cpu_has(X86_FEATURE_FXSR); | |
73 | } | |
74 | ||
75 | extern void __sanitize_i387_state(struct task_struct *); | |
76 | ||
77 | static inline void sanitize_i387_state(struct task_struct *tsk) | |
78 | { | |
79 | if (!use_xsaveopt()) | |
80 | return; | |
81 | __sanitize_i387_state(tsk); | |
82 | } | |
83 | ||
84 | #ifdef CONFIG_X86_64 | |
85 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |
86 | { | |
87 | int err; | |
88 | ||
89 | /* See comment in fxsave() below. */ | |
90 | #ifdef CONFIG_AS_FXSAVEQ | |
91 | asm volatile("1: fxrstorq %[fx]\n\t" | |
92 | "2:\n" | |
93 | ".section .fixup,\"ax\"\n" | |
94 | "3: movl $-1,%[err]\n" | |
95 | " jmp 2b\n" | |
96 | ".previous\n" | |
97 | _ASM_EXTABLE(1b, 3b) | |
98 | : [err] "=r" (err) | |
99 | : [fx] "m" (*fx), "0" (0)); | |
100 | #else | |
101 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" | |
102 | "2:\n" | |
103 | ".section .fixup,\"ax\"\n" | |
104 | "3: movl $-1,%[err]\n" | |
105 | " jmp 2b\n" | |
106 | ".previous\n" | |
107 | _ASM_EXTABLE(1b, 3b) | |
108 | : [err] "=r" (err) | |
109 | : [fx] "R" (fx), "m" (*fx), "0" (0)); | |
110 | #endif | |
111 | return err; | |
112 | } | |
113 | ||
114 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | |
115 | { | |
116 | int err; | |
117 | ||
118 | /* | |
119 | * Clear the bytes not touched by the fxsave and reserved | |
120 | * for the SW usage. | |
121 | */ | |
122 | err = __clear_user(&fx->sw_reserved, | |
123 | sizeof(struct _fpx_sw_bytes)); | |
124 | if (unlikely(err)) | |
125 | return -EFAULT; | |
126 | ||
127 | /* See comment in fxsave() below. */ | |
128 | #ifdef CONFIG_AS_FXSAVEQ | |
63bcff2a PA |
129 | asm volatile(ASM_STAC "\n" |
130 | "1: fxsaveq %[fx]\n\t" | |
131 | "2: " ASM_CLAC "\n" | |
1361b83a LT |
132 | ".section .fixup,\"ax\"\n" |
133 | "3: movl $-1,%[err]\n" | |
134 | " jmp 2b\n" | |
135 | ".previous\n" | |
136 | _ASM_EXTABLE(1b, 3b) | |
137 | : [err] "=r" (err), [fx] "=m" (*fx) | |
138 | : "0" (0)); | |
139 | #else | |
63bcff2a PA |
140 | asm volatile(ASM_STAC "\n" |
141 | "1: rex64/fxsave (%[fx])\n\t" | |
142 | "2: " ASM_CLAC "\n" | |
1361b83a LT |
143 | ".section .fixup,\"ax\"\n" |
144 | "3: movl $-1,%[err]\n" | |
145 | " jmp 2b\n" | |
146 | ".previous\n" | |
147 | _ASM_EXTABLE(1b, 3b) | |
148 | : [err] "=r" (err), "=m" (*fx) | |
149 | : [fx] "R" (fx), "0" (0)); | |
150 | #endif | |
151 | if (unlikely(err) && | |
152 | __clear_user(fx, sizeof(struct i387_fxsave_struct))) | |
153 | err = -EFAULT; | |
154 | /* No need to clear here because the caller clears USED_MATH */ | |
155 | return err; | |
156 | } | |
157 | ||
158 | static inline void fpu_fxsave(struct fpu *fpu) | |
159 | { | |
160 | /* Using "rex64; fxsave %0" is broken because, if the memory operand | |
161 | uses any extended registers for addressing, a second REX prefix | |
162 | will be generated (to the assembler, rex64 followed by semicolon | |
163 | is a separate instruction), and hence the 64-bitness is lost. */ | |
164 | ||
165 | #ifdef CONFIG_AS_FXSAVEQ | |
166 | /* Using "fxsaveq %0" would be the ideal choice, but is only supported | |
167 | starting with gas 2.16. */ | |
168 | __asm__ __volatile__("fxsaveq %0" | |
169 | : "=m" (fpu->state->fxsave)); | |
170 | #else | |
171 | /* Using, as a workaround, the properly prefixed form below isn't | |
172 | accepted by any binutils version so far released, complaining that | |
173 | the same type of prefix is used twice if an extended register is | |
174 | needed for addressing (fix submitted to mainline 2005-11-21). | |
175 | asm volatile("rex64/fxsave %0" | |
176 | : "=m" (fpu->state->fxsave)); | |
177 | This, however, we can work around by forcing the compiler to select | |
178 | an addressing mode that doesn't require extended registers. */ | |
179 | asm volatile("rex64/fxsave (%[fx])" | |
180 | : "=m" (fpu->state->fxsave) | |
181 | : [fx] "R" (&fpu->state->fxsave)); | |
182 | #endif | |
183 | } | |
184 | ||
185 | #else /* CONFIG_X86_32 */ | |
186 | ||
187 | /* perform fxrstor iff the processor has extended states, otherwise frstor */ | |
188 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |
189 | { | |
190 | /* | |
191 | * The "nop" is needed to make the instructions the same | |
192 | * length. | |
193 | */ | |
194 | alternative_input( | |
195 | "nop ; frstor %1", | |
196 | "fxrstor %1", | |
197 | X86_FEATURE_FXSR, | |
198 | "m" (*fx)); | |
199 | ||
200 | return 0; | |
201 | } | |
202 | ||
203 | static inline void fpu_fxsave(struct fpu *fpu) | |
204 | { | |
205 | asm volatile("fxsave %[fx]" | |
206 | : [fx] "=m" (fpu->state->fxsave)); | |
207 | } | |
208 | ||
209 | #endif /* CONFIG_X86_64 */ | |
210 | ||
211 | /* | |
212 | * These must be called with preempt disabled. Returns | |
213 | * 'true' if the FPU state is still intact. | |
214 | */ | |
215 | static inline int fpu_save_init(struct fpu *fpu) | |
216 | { | |
217 | if (use_xsave()) { | |
218 | fpu_xsave(fpu); | |
219 | ||
220 | /* | |
221 | * xsave header may indicate the init state of the FP. | |
222 | */ | |
223 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) | |
224 | return 1; | |
225 | } else if (use_fxsr()) { | |
226 | fpu_fxsave(fpu); | |
227 | } else { | |
228 | asm volatile("fnsave %[fx]; fwait" | |
229 | : [fx] "=m" (fpu->state->fsave)); | |
230 | return 0; | |
231 | } | |
232 | ||
233 | /* | |
234 | * If exceptions are pending, we need to clear them so | |
235 | * that we don't randomly get exceptions later. | |
236 | * | |
237 | * FIXME! Is this perhaps only true for the old-style | |
238 | * irq13 case? Maybe we could leave the x87 state | |
239 | * intact otherwise? | |
240 | */ | |
241 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) { | |
242 | asm volatile("fnclex"); | |
243 | return 0; | |
244 | } | |
245 | return 1; | |
246 | } | |
247 | ||
248 | static inline int __save_init_fpu(struct task_struct *tsk) | |
249 | { | |
250 | return fpu_save_init(&tsk->thread.fpu); | |
251 | } | |
252 | ||
253 | static inline int fpu_fxrstor_checking(struct fpu *fpu) | |
254 | { | |
255 | return fxrstor_checking(&fpu->state->fxsave); | |
256 | } | |
257 | ||
258 | static inline int fpu_restore_checking(struct fpu *fpu) | |
259 | { | |
260 | if (use_xsave()) | |
261 | return fpu_xrstor_checking(fpu); | |
262 | else | |
263 | return fpu_fxrstor_checking(fpu); | |
264 | } | |
265 | ||
266 | static inline int restore_fpu_checking(struct task_struct *tsk) | |
267 | { | |
268 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | |
269 | is pending. Clear the x87 state here by setting it to fixed | |
270 | values. "m" is a random variable that should be in L1 */ | |
271 | alternative_input( | |
272 | ASM_NOP8 ASM_NOP2, | |
273 | "emms\n\t" /* clear stack tags */ | |
274 | "fildl %P[addr]", /* set F?P to defined value */ | |
275 | X86_FEATURE_FXSAVE_LEAK, | |
276 | [addr] "m" (tsk->thread.fpu.has_fpu)); | |
277 | ||
278 | return fpu_restore_checking(&tsk->thread.fpu); | |
279 | } | |
280 | ||
281 | /* | |
282 | * Software FPU state helpers. Careful: these need to | |
283 | * be preemption protection *and* they need to be | |
284 | * properly paired with the CR0.TS changes! | |
285 | */ | |
286 | static inline int __thread_has_fpu(struct task_struct *tsk) | |
287 | { | |
288 | return tsk->thread.fpu.has_fpu; | |
289 | } | |
290 | ||
291 | /* Must be paired with an 'stts' after! */ | |
292 | static inline void __thread_clear_has_fpu(struct task_struct *tsk) | |
293 | { | |
294 | tsk->thread.fpu.has_fpu = 0; | |
c6ae41e7 | 295 | this_cpu_write(fpu_owner_task, NULL); |
1361b83a LT |
296 | } |
297 | ||
298 | /* Must be paired with a 'clts' before! */ | |
299 | static inline void __thread_set_has_fpu(struct task_struct *tsk) | |
300 | { | |
301 | tsk->thread.fpu.has_fpu = 1; | |
c6ae41e7 | 302 | this_cpu_write(fpu_owner_task, tsk); |
1361b83a LT |
303 | } |
304 | ||
305 | /* | |
306 | * Encapsulate the CR0.TS handling together with the | |
307 | * software flag. | |
308 | * | |
309 | * These generally need preemption protection to work, | |
310 | * do try to avoid using these on their own. | |
311 | */ | |
312 | static inline void __thread_fpu_end(struct task_struct *tsk) | |
313 | { | |
314 | __thread_clear_has_fpu(tsk); | |
315 | stts(); | |
316 | } | |
317 | ||
318 | static inline void __thread_fpu_begin(struct task_struct *tsk) | |
319 | { | |
320 | clts(); | |
321 | __thread_set_has_fpu(tsk); | |
322 | } | |
323 | ||
324 | /* | |
325 | * FPU state switching for scheduling. | |
326 | * | |
327 | * This is a two-stage process: | |
328 | * | |
329 | * - switch_fpu_prepare() saves the old state and | |
330 | * sets the new state of the CR0.TS bit. This is | |
331 | * done within the context of the old process. | |
332 | * | |
333 | * - switch_fpu_finish() restores the new state as | |
334 | * necessary. | |
335 | */ | |
336 | typedef struct { int preload; } fpu_switch_t; | |
337 | ||
338 | /* | |
339 | * FIXME! We could do a totally lazy restore, but we need to | |
340 | * add a per-cpu "this was the task that last touched the FPU | |
341 | * on this CPU" variable, and the task needs to have a "I last | |
342 | * touched the FPU on this CPU" and check them. | |
343 | * | |
344 | * We don't do that yet, so "fpu_lazy_restore()" always returns | |
345 | * false, but some day.. | |
346 | */ | |
347 | static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) | |
348 | { | |
c6ae41e7 | 349 | return new == this_cpu_read_stable(fpu_owner_task) && |
1361b83a LT |
350 | cpu == new->thread.fpu.last_cpu; |
351 | } | |
352 | ||
353 | static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) | |
354 | { | |
355 | fpu_switch_t fpu; | |
356 | ||
357 | fpu.preload = tsk_used_math(new) && new->fpu_counter > 5; | |
358 | if (__thread_has_fpu(old)) { | |
359 | if (!__save_init_fpu(old)) | |
360 | cpu = ~0; | |
361 | old->thread.fpu.last_cpu = cpu; | |
362 | old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */ | |
363 | ||
364 | /* Don't change CR0.TS if we just switch! */ | |
365 | if (fpu.preload) { | |
366 | new->fpu_counter++; | |
367 | __thread_set_has_fpu(new); | |
368 | prefetch(new->thread.fpu.state); | |
369 | } else | |
370 | stts(); | |
371 | } else { | |
372 | old->fpu_counter = 0; | |
373 | old->thread.fpu.last_cpu = ~0; | |
374 | if (fpu.preload) { | |
375 | new->fpu_counter++; | |
376 | if (fpu_lazy_restore(new, cpu)) | |
377 | fpu.preload = 0; | |
378 | else | |
379 | prefetch(new->thread.fpu.state); | |
380 | __thread_fpu_begin(new); | |
381 | } | |
382 | } | |
383 | return fpu; | |
384 | } | |
385 | ||
386 | /* | |
387 | * By the time this gets called, we've already cleared CR0.TS and | |
388 | * given the process the FPU if we are going to preload the FPU | |
389 | * state - all we need to do is to conditionally restore the register | |
390 | * state itself. | |
391 | */ | |
392 | static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) | |
393 | { | |
394 | if (fpu.preload) { | |
395 | if (unlikely(restore_fpu_checking(new))) | |
396 | __thread_fpu_end(new); | |
397 | } | |
398 | } | |
399 | ||
400 | /* | |
401 | * Signal frame handlers... | |
402 | */ | |
403 | extern int save_i387_xstate(void __user *buf); | |
404 | extern int restore_i387_xstate(void __user *buf); | |
405 | ||
406 | static inline void __clear_fpu(struct task_struct *tsk) | |
407 | { | |
408 | if (__thread_has_fpu(tsk)) { | |
409 | /* Ignore delayed exceptions from user space */ | |
410 | asm volatile("1: fwait\n" | |
411 | "2:\n" | |
412 | _ASM_EXTABLE(1b, 2b)); | |
413 | __thread_fpu_end(tsk); | |
414 | } | |
415 | } | |
416 | ||
417 | /* | |
418 | * The actual user_fpu_begin/end() functions | |
419 | * need to be preemption-safe. | |
420 | * | |
421 | * NOTE! user_fpu_end() must be used only after you | |
422 | * have saved the FP state, and user_fpu_begin() must | |
423 | * be used only immediately before restoring it. | |
424 | * These functions do not do any save/restore on | |
425 | * their own. | |
426 | */ | |
427 | static inline void user_fpu_end(void) | |
428 | { | |
429 | preempt_disable(); | |
430 | __thread_fpu_end(current); | |
431 | preempt_enable(); | |
432 | } | |
433 | ||
434 | static inline void user_fpu_begin(void) | |
435 | { | |
436 | preempt_disable(); | |
437 | if (!user_has_fpu()) | |
438 | __thread_fpu_begin(current); | |
439 | preempt_enable(); | |
440 | } | |
441 | ||
442 | /* | |
443 | * These disable preemption on their own and are safe | |
444 | */ | |
445 | static inline void save_init_fpu(struct task_struct *tsk) | |
446 | { | |
447 | WARN_ON_ONCE(!__thread_has_fpu(tsk)); | |
448 | preempt_disable(); | |
449 | __save_init_fpu(tsk); | |
450 | __thread_fpu_end(tsk); | |
451 | preempt_enable(); | |
452 | } | |
453 | ||
454 | static inline void clear_fpu(struct task_struct *tsk) | |
455 | { | |
456 | preempt_disable(); | |
457 | __clear_fpu(tsk); | |
458 | preempt_enable(); | |
459 | } | |
460 | ||
461 | /* | |
462 | * i387 state interaction | |
463 | */ | |
464 | static inline unsigned short get_fpu_cwd(struct task_struct *tsk) | |
465 | { | |
466 | if (cpu_has_fxsr) { | |
467 | return tsk->thread.fpu.state->fxsave.cwd; | |
468 | } else { | |
469 | return (unsigned short)tsk->thread.fpu.state->fsave.cwd; | |
470 | } | |
471 | } | |
472 | ||
473 | static inline unsigned short get_fpu_swd(struct task_struct *tsk) | |
474 | { | |
475 | if (cpu_has_fxsr) { | |
476 | return tsk->thread.fpu.state->fxsave.swd; | |
477 | } else { | |
478 | return (unsigned short)tsk->thread.fpu.state->fsave.swd; | |
479 | } | |
480 | } | |
481 | ||
482 | static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) | |
483 | { | |
484 | if (cpu_has_xmm) { | |
485 | return tsk->thread.fpu.state->fxsave.mxcsr; | |
486 | } else { | |
487 | return MXCSR_DEFAULT; | |
488 | } | |
489 | } | |
490 | ||
491 | static bool fpu_allocated(struct fpu *fpu) | |
492 | { | |
493 | return fpu->state != NULL; | |
494 | } | |
495 | ||
496 | static inline int fpu_alloc(struct fpu *fpu) | |
497 | { | |
498 | if (fpu_allocated(fpu)) | |
499 | return 0; | |
500 | fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL); | |
501 | if (!fpu->state) | |
502 | return -ENOMEM; | |
503 | WARN_ON((unsigned long)fpu->state & 15); | |
504 | return 0; | |
505 | } | |
506 | ||
507 | static inline void fpu_free(struct fpu *fpu) | |
508 | { | |
509 | if (fpu->state) { | |
510 | kmem_cache_free(task_xstate_cachep, fpu->state); | |
511 | fpu->state = NULL; | |
512 | } | |
513 | } | |
514 | ||
515 | static inline void fpu_copy(struct fpu *dst, struct fpu *src) | |
516 | { | |
517 | memcpy(dst->state, src->state, xstate_size); | |
518 | } | |
519 | ||
520 | extern void fpu_finit(struct fpu *fpu); | |
521 | ||
522 | #endif |