x86/fpu: Move debugging check from kernel_fpu_begin() to __kernel_fpu_begin()
[deliverable/linux.git] / arch / x86 / kernel / fpu / core.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
78f7f1e5 8#include <asm/fpu/internal.h>
59a36d16 9#include <asm/fpu/regset.h>
fcbc99c4 10#include <asm/fpu/signal.h>
e1cebad4 11#include <asm/traps.h>
fcbc99c4 12
91066588 13#include <linux/hardirq.h>
1da177e4 14
6f575023
IM
15/*
16 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
17 * depending on the FPU hardware format:
18 */
c47ada30 19union fpregs_state init_fpstate __read_mostly;
6f575023 20
085cc281
IM
21/*
22 * Track whether the kernel is using the FPU state
23 * currently.
24 *
25 * This flag is used:
26 *
27 * - by IRQ context code to potentially use the FPU
28 * if it's unused.
29 *
30 * - to debug kernel_fpu_begin()/end() correctness
31 */
14e153ef
ON
32static DEFINE_PER_CPU(bool, in_kernel_fpu);
33
b0c050c5 34/*
36b544dc 35 * Track which context is using the FPU on the CPU:
b0c050c5 36 */
36b544dc 37DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
b0c050c5 38
416d49ac 39static void kernel_fpu_disable(void)
7575637a
ON
40{
41 WARN_ON(this_cpu_read(in_kernel_fpu));
42 this_cpu_write(in_kernel_fpu, true);
43}
44
416d49ac 45static void kernel_fpu_enable(void)
7575637a 46{
3103ae3a 47 WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
7575637a
ON
48 this_cpu_write(in_kernel_fpu, false);
49}
50
085cc281
IM
51static bool kernel_fpu_disabled(void)
52{
53 return this_cpu_read(in_kernel_fpu);
54}
55
8546c008
LT
56/*
57 * Were we in an interrupt that interrupted kernel mode?
58 *
304bceda 59 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
8546c008
LT
60 * pair does nothing at all: the thread must not have fpu (so
61 * that we don't try to save the FPU state), and TS must
62 * be set (so that the clts/stts pair does nothing that is
63 * visible in the interrupted kernel thread).
5187b28f 64 *
4b2e762e
ON
65 * Except for the eagerfpu case when we return true; in the likely case
66 * the thread has FPU but we are not going to set/clear TS.
8546c008 67 */
416d49ac 68static bool interrupted_kernel_fpu_idle(void)
8546c008 69{
085cc281 70 if (kernel_fpu_disabled())
14e153ef
ON
71 return false;
72
5d2bd700 73 if (use_eager_fpu())
4b2e762e 74 return true;
304bceda 75
d5cea9b0 76 return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
8546c008
LT
77}
78
79/*
80 * Were we in user mode (or vm86 mode) when we were
81 * interrupted?
82 *
83 * Doing kernel_fpu_begin/end() is ok if we are running
84 * in an interrupt context from user mode - we'll just
85 * save the FPU state as required.
86 */
416d49ac 87static bool interrupted_user_mode(void)
8546c008
LT
88{
89 struct pt_regs *regs = get_irq_regs();
f39b6f0e 90 return regs && user_mode(regs);
8546c008
LT
91}
92
93/*
94 * Can we use the FPU in kernel mode with the
95 * whole "kernel_fpu_begin/end()" sequence?
96 *
97 * It's always ok in process context (ie "not interrupt")
98 * but it is sometimes ok even from an irq.
99 */
100bool irq_fpu_usable(void)
101{
102 return !in_interrupt() ||
103 interrupted_user_mode() ||
104 interrupted_kernel_fpu_idle();
105}
106EXPORT_SYMBOL(irq_fpu_usable);
107
b1a74bf8 108void __kernel_fpu_begin(void)
8546c008 109{
36b544dc 110 struct fpu *fpu = &current->thread.fpu;
8546c008 111
63c6680c
IM
112 WARN_ON_ONCE(!irq_fpu_usable());
113
3103ae3a 114 kernel_fpu_disable();
14e153ef 115
d5cea9b0 116 if (fpu->fpregs_active) {
4f836347 117 copy_fpregs_to_fpstate(fpu);
7aeccb83 118 } else {
36b544dc 119 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
32b49b3c 120 __fpregs_activate_hw();
8546c008
LT
121 }
122}
b1a74bf8 123EXPORT_SYMBOL(__kernel_fpu_begin);
8546c008 124
b1a74bf8 125void __kernel_fpu_end(void)
8546c008 126{
af2d94fd 127 struct fpu *fpu = &current->thread.fpu;
33a3ebdc 128
d5cea9b0 129 if (fpu->fpregs_active) {
0e75c54f 130 if (WARN_ON(copy_fpstate_to_fpregs(fpu)))
fbce7782 131 fpu__clear(fpu);
32b49b3c
IM
132 } else {
133 __fpregs_deactivate_hw();
731bd6a9 134 }
14e153ef 135
3103ae3a 136 kernel_fpu_enable();
8546c008 137}
b1a74bf8 138EXPORT_SYMBOL(__kernel_fpu_end);
8546c008 139
d63e79b1
IM
140void kernel_fpu_begin(void)
141{
142 preempt_disable();
d63e79b1
IM
143 __kernel_fpu_begin();
144}
145EXPORT_SYMBOL_GPL(kernel_fpu_begin);
146
147void kernel_fpu_end(void)
148{
149 __kernel_fpu_end();
150 preempt_enable();
151}
152EXPORT_SYMBOL_GPL(kernel_fpu_end);
153
91066588
IM
154/*
155 * CR0::TS save/restore functions:
156 */
157int irq_ts_save(void)
158{
159 /*
160 * If in process context and not atomic, we can take a spurious DNA fault.
161 * Otherwise, doing clts() in process context requires disabling preemption
162 * or some heavy lifting like kernel_fpu_begin()
163 */
164 if (!in_atomic())
165 return 0;
166
167 if (read_cr0() & X86_CR0_TS) {
168 clts();
169 return 1;
170 }
171
172 return 0;
173}
174EXPORT_SYMBOL_GPL(irq_ts_save);
175
176void irq_ts_restore(int TS_state)
177{
178 if (TS_state)
179 stts();
180}
181EXPORT_SYMBOL_GPL(irq_ts_restore);
182
4af08f2f 183/*
48c4717f 184 * Save the FPU state (mark it for reload if necessary):
87cdb98a
IM
185 *
186 * This only ever gets called for the current task.
4af08f2f 187 */
0c070595 188void fpu__save(struct fpu *fpu)
8546c008 189{
0c070595 190 WARN_ON(fpu != &current->thread.fpu);
87cdb98a 191
8546c008 192 preempt_disable();
d5cea9b0 193 if (fpu->fpregs_active) {
48c4717f 194 if (!copy_fpregs_to_fpstate(fpu))
66af8e27 195 fpregs_deactivate(fpu);
a9241ea5 196 }
8546c008
LT
197 preempt_enable();
198}
4af08f2f 199EXPORT_SYMBOL_GPL(fpu__save);
8546c008 200
0aba6978
IM
201/*
202 * Legacy x87 fpstate state init:
203 */
c47ada30 204static inline void fpstate_init_fstate(struct fregs_state *fp)
0aba6978
IM
205{
206 fp->cwd = 0xffff037fu;
207 fp->swd = 0xffff0000u;
208 fp->twd = 0xffffffffu;
209 fp->fos = 0xffff0000u;
210}
211
c47ada30 212void fpstate_init(union fpregs_state *state)
1da177e4 213{
60e019eb 214 if (!cpu_has_fpu) {
bf935b0b 215 fpstate_init_soft(&state->soft);
86603283 216 return;
e8a496ac 217 }
e8a496ac 218
bf935b0b 219 memset(state, 0, xstate_size);
1d23c451 220
0aba6978 221 if (cpu_has_fxsr)
bf935b0b 222 fpstate_init_fxstate(&state->fxsave);
0aba6978 223 else
bf935b0b 224 fpstate_init_fstate(&state->fsave);
86603283 225}
c0ee2cf6 226EXPORT_SYMBOL_GPL(fpstate_init);
86603283 227
bfd6fc05
IM
228/*
229 * Copy the current task's FPU state to a new task's FPU context.
230 *
aeb997b9
IM
231 * In both the 'eager' and the 'lazy' case we save hardware registers
232 * directly to the destination buffer.
bfd6fc05 233 */
f9bc977f 234static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
e102f30f 235{
f9bc977f 236 WARN_ON(src_fpu != &current->thread.fpu);
bfd6fc05 237
b1652900
IM
238 /*
239 * Don't let 'init optimized' areas of the XSAVE area
240 * leak into the child task:
241 */
242 if (use_eager_fpu())
7366ed77 243 memset(&dst_fpu->state.xsave, 0, xstate_size);
b1652900
IM
244
245 /*
246 * Save current FPU registers directly into the child
247 * FPU context, without any memory-to-memory copying.
248 *
249 * If the FPU context got destroyed in the process (FNSAVE
250 * done on old CPUs) then copy it back into the source
251 * context and mark the current task for lazy restore.
252 *
253 * We have to do all this with preemption disabled,
254 * mostly because of the FNSAVE case, because in that
255 * case we must not allow preemption in the window
256 * between the FNSAVE and us marking the context lazy.
257 *
258 * It shouldn't be an issue as even FNSAVE is plenty
259 * fast in terms of critical section length.
260 */
261 preempt_disable();
262 if (!copy_fpregs_to_fpstate(dst_fpu)) {
263 memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
264 fpregs_deactivate(src_fpu);
e102f30f 265 }
b1652900 266 preempt_enable();
e102f30f
IM
267}
268
c69e098b 269int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
a752b53d 270{
c69e098b 271 dst_fpu->counter = 0;
d5cea9b0 272 dst_fpu->fpregs_active = 0;
c69e098b 273 dst_fpu->last_cpu = -1;
a752b53d 274
c4d6ee6e 275 if (src_fpu->fpstate_active)
f9bc977f 276 fpu_copy(dst_fpu, src_fpu);
c4d6ee6e 277
a752b53d
IM
278 return 0;
279}
280
97185c95 281/*
c4d72e2d
IM
282 * Activate the current task's in-memory FPU context,
283 * if it has not been used before:
97185c95 284 */
c4d72e2d 285void fpu__activate_curr(struct fpu *fpu)
97185c95 286{
91d93d0e 287 WARN_ON_ONCE(fpu != &current->thread.fpu);
97185c95 288
c4d72e2d 289 if (!fpu->fpstate_active) {
bf935b0b 290 fpstate_init(&fpu->state);
97185c95 291
c4d72e2d
IM
292 /* Safe to do for the current task: */
293 fpu->fpstate_active = 1;
294 }
97185c95 295}
c4d72e2d 296EXPORT_SYMBOL_GPL(fpu__activate_curr);
97185c95 297
86603283 298/*
67ee658e
IM
299 * This function must be called before we modify a stopped child's
300 * fpstate.
af7f8721
IM
301 *
302 * If the child has not used the FPU before then initialize its
67ee658e 303 * fpstate.
af7f8721
IM
304 *
305 * If the child has used the FPU before then unlazy it.
306 *
67ee658e
IM
307 * [ After this function call, after registers in the fpstate are
308 * modified and the child task has woken up, the child task will
309 * restore the modified FPU state from the modified context. If we
af7f8721 310 * didn't clear its lazy status here then the lazy in-registers
67ee658e 311 * state pending on its former CPU could be restored, corrupting
af7f8721
IM
312 * the modifications. ]
313 *
314 * This function is also called before we read a stopped child's
67ee658e
IM
315 * FPU state - to make sure it's initialized if the child has
316 * no active FPU state.
af7f8721
IM
317 *
318 * TODO: A future optimization would be to skip the unlazying in
319 * the read-only case, it's not strictly necessary for
320 * read-only access to the context.
86603283 321 */
0c306bcf 322void fpu__activate_stopped(struct fpu *child_fpu)
86603283 323{
2fb29fc7 324 WARN_ON_ONCE(child_fpu == &current->thread.fpu);
67e97fc2 325
c5bedc68 326 if (child_fpu->fpstate_active) {
cc08d545 327 child_fpu->last_cpu = -1;
2fb29fc7 328 } else {
bf935b0b 329 fpstate_init(&child_fpu->state);
071ae621 330
2fb29fc7
IM
331 /* Safe to do for stopped child tasks: */
332 child_fpu->fpstate_active = 1;
333 }
1da177e4
LT
334}
335
93b90712 336/*
be7436d5
IM
337 * 'fpu__restore()' is called to copy FPU registers from
338 * the FPU fpstate to the live hw registers and to activate
339 * access to the hardware registers, so that FPU instructions
340 * can be used afterwards.
93b90712 341 *
be7436d5
IM
342 * Must be called with kernel preemption disabled (for example
343 * with local interrupts disabled, as it is in the case of
344 * do_device_not_available()).
93b90712 345 */
3a0aee48 346void fpu__restore(void)
93b90712
IM
347{
348 struct task_struct *tsk = current;
4540d3fa 349 struct fpu *fpu = &tsk->thread.fpu;
93b90712 350
c4d72e2d 351 fpu__activate_curr(fpu);
93b90712 352
232f62cd 353 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
93b90712 354 kernel_fpu_disable();
232f62cd 355 fpregs_activate(fpu);
0e75c54f 356 if (unlikely(copy_fpstate_to_fpregs(fpu))) {
fbce7782 357 fpu__clear(fpu);
93b90712
IM
358 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
359 } else {
360 tsk->thread.fpu.counter++;
361 }
362 kernel_fpu_enable();
363}
3a0aee48 364EXPORT_SYMBOL_GPL(fpu__restore);
93b90712 365
6ffc152e
IM
366/*
367 * Drops current FPU state: deactivates the fpregs and
368 * the fpstate. NOTE: it still leaves previous contents
369 * in the fpregs in the eager-FPU case.
370 *
371 * This function can be used in cases where we know that
372 * a state-restore is coming: either an explicit one,
373 * or a reschedule.
374 */
375void fpu__drop(struct fpu *fpu)
376{
377 preempt_disable();
378 fpu->counter = 0;
379
380 if (fpu->fpregs_active) {
381 /* Ignore delayed exceptions from user space */
382 asm volatile("1: fwait\n"
383 "2:\n"
384 _ASM_EXTABLE(1b, 2b));
385 fpregs_deactivate(fpu);
386 }
387
388 fpu->fpstate_active = 0;
389
390 preempt_enable();
391}
392
81541889
IM
393/*
394 * Clear FPU registers by setting them up from
395 * the init fpstate:
396 */
397static inline void copy_init_fpstate_to_fpregs(void)
398{
399 if (use_xsave())
c6813144 400 copy_kernel_to_xregs(&init_fpstate.xsave, -1);
81541889 401 else
c6813144 402 copy_kernel_to_fxregs(&init_fpstate.fxsave);
81541889
IM
403}
404
6ffc152e 405/*
fbce7782
IM
406 * Clear the FPU state back to init state.
407 *
408 * Called by sys_execve(), by the signal handler code and by various
409 * error paths.
2e85591a 410 */
04c8e01d 411void fpu__clear(struct fpu *fpu)
81683cc8 412{
04c8e01d 413 WARN_ON_ONCE(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
4c138410 414
81683cc8
IM
415 if (!use_eager_fpu()) {
416 /* FPU state will be reallocated lazily at the first use. */
50338615 417 fpu__drop(fpu);
81683cc8 418 } else {
c5bedc68 419 if (!fpu->fpstate_active) {
c4d72e2d 420 fpu__activate_curr(fpu);
81683cc8
IM
421 user_fpu_begin();
422 }
81541889 423 copy_init_fpstate_to_fpregs();
81683cc8
IM
424 }
425}
426
e1cebad4
IM
427/*
428 * x87 math exception handling:
429 */
430
431static inline unsigned short get_fpu_cwd(struct fpu *fpu)
432{
433 if (cpu_has_fxsr) {
434 return fpu->state.fxsave.cwd;
435 } else {
436 return (unsigned short)fpu->state.fsave.cwd;
437 }
438}
439
440static inline unsigned short get_fpu_swd(struct fpu *fpu)
441{
442 if (cpu_has_fxsr) {
443 return fpu->state.fxsave.swd;
444 } else {
445 return (unsigned short)fpu->state.fsave.swd;
446 }
447}
448
449static inline unsigned short get_fpu_mxcsr(struct fpu *fpu)
450{
451 if (cpu_has_xmm) {
452 return fpu->state.fxsave.mxcsr;
453 } else {
454 return MXCSR_DEFAULT;
455 }
456}
457
458int fpu__exception_code(struct fpu *fpu, int trap_nr)
459{
460 int err;
461
462 if (trap_nr == X86_TRAP_MF) {
463 unsigned short cwd, swd;
464 /*
465 * (~cwd & swd) will mask out exceptions that are not set to unmasked
466 * status. 0x3f is the exception bits in these regs, 0x200 is the
467 * C1 reg you need in case of a stack fault, 0x040 is the stack
468 * fault bit. We should only be taking one exception at a time,
469 * so if this combination doesn't produce any single exception,
470 * then we have a bad program that isn't synchronizing its FPU usage
471 * and it will suffer the consequences since we won't be able to
472 * fully reproduce the context of the exception
473 */
474 cwd = get_fpu_cwd(fpu);
475 swd = get_fpu_swd(fpu);
476
477 err = swd & ~cwd;
478 } else {
479 /*
480 * The SIMD FPU exceptions are handled a little differently, as there
481 * is only a single status/control register. Thus, to determine which
482 * unmasked exception was caught we must mask the exception mask bits
483 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
484 */
485 unsigned short mxcsr = get_fpu_mxcsr(fpu);
486 err = ~(mxcsr >> 7) & mxcsr;
487 }
488
489 if (err & 0x001) { /* Invalid op */
490 /*
491 * swd & 0x240 == 0x040: Stack Underflow
492 * swd & 0x240 == 0x240: Stack Overflow
493 * User must clear the SF bit (0x40) if set
494 */
495 return FPE_FLTINV;
496 } else if (err & 0x004) { /* Divide by Zero */
497 return FPE_FLTDIV;
498 } else if (err & 0x008) { /* Overflow */
499 return FPE_FLTOVF;
500 } else if (err & 0x012) { /* Denormal, Underflow */
501 return FPE_FLTUND;
502 } else if (err & 0x020) { /* Precision */
503 return FPE_FLTRES;
504 }
505
506 /*
507 * If we're using IRQ 13, or supposedly even some trap
508 * X86_TRAP_MF implementations, it's possible
509 * we get a spurious trap, which is not an error.
510 */
511 return 0;
512}
This page took 0.709556 seconds and 5 git commands to generate.