x86/fpu: Optimize fpu_copy() some more on lazy switching systems
[deliverable/linux.git] / arch / x86 / kernel / fpu / core.c
1 /*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8 #include <asm/fpu/internal.h>
9 #include <linux/hardirq.h>
10
11 /*
12 * Track whether the kernel is using the FPU state
13 * currently.
14 *
15 * This flag is used:
16 *
17 * - by IRQ context code to potentially use the FPU
18 * if it's unused.
19 *
20 * - to debug kernel_fpu_begin()/end() correctness
21 */
22 static DEFINE_PER_CPU(bool, in_kernel_fpu);
23
24 /*
25 * Track which context is using the FPU on the CPU:
26 */
27 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
28
29 static void kernel_fpu_disable(void)
30 {
31 WARN_ON(this_cpu_read(in_kernel_fpu));
32 this_cpu_write(in_kernel_fpu, true);
33 }
34
35 static void kernel_fpu_enable(void)
36 {
37 WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
38 this_cpu_write(in_kernel_fpu, false);
39 }
40
41 static bool kernel_fpu_disabled(void)
42 {
43 return this_cpu_read(in_kernel_fpu);
44 }
45
46 /*
47 * Were we in an interrupt that interrupted kernel mode?
48 *
49 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
50 * pair does nothing at all: the thread must not have fpu (so
51 * that we don't try to save the FPU state), and TS must
52 * be set (so that the clts/stts pair does nothing that is
53 * visible in the interrupted kernel thread).
54 *
55 * Except for the eagerfpu case when we return true; in the likely case
56 * the thread has FPU but we are not going to set/clear TS.
57 */
58 static bool interrupted_kernel_fpu_idle(void)
59 {
60 if (kernel_fpu_disabled())
61 return false;
62
63 if (use_eager_fpu())
64 return true;
65
66 return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
67 }
68
69 /*
70 * Were we in user mode (or vm86 mode) when we were
71 * interrupted?
72 *
73 * Doing kernel_fpu_begin/end() is ok if we are running
74 * in an interrupt context from user mode - we'll just
75 * save the FPU state as required.
76 */
77 static bool interrupted_user_mode(void)
78 {
79 struct pt_regs *regs = get_irq_regs();
80 return regs && user_mode(regs);
81 }
82
83 /*
84 * Can we use the FPU in kernel mode with the
85 * whole "kernel_fpu_begin/end()" sequence?
86 *
87 * It's always ok in process context (ie "not interrupt")
88 * but it is sometimes ok even from an irq.
89 */
90 bool irq_fpu_usable(void)
91 {
92 return !in_interrupt() ||
93 interrupted_user_mode() ||
94 interrupted_kernel_fpu_idle();
95 }
96 EXPORT_SYMBOL(irq_fpu_usable);
97
98 void __kernel_fpu_begin(void)
99 {
100 struct fpu *fpu = &current->thread.fpu;
101
102 kernel_fpu_disable();
103
104 if (fpu->fpregs_active) {
105 copy_fpregs_to_fpstate(fpu);
106 } else {
107 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
108 __fpregs_activate_hw();
109 }
110 }
111 EXPORT_SYMBOL(__kernel_fpu_begin);
112
113 void __kernel_fpu_end(void)
114 {
115 struct fpu *fpu = &current->thread.fpu;
116
117 if (fpu->fpregs_active) {
118 if (WARN_ON(restore_fpu_checking(fpu)))
119 fpu_reset_state(fpu);
120 } else {
121 __fpregs_deactivate_hw();
122 }
123
124 kernel_fpu_enable();
125 }
126 EXPORT_SYMBOL(__kernel_fpu_end);
127
128 void kernel_fpu_begin(void)
129 {
130 preempt_disable();
131 WARN_ON_ONCE(!irq_fpu_usable());
132 __kernel_fpu_begin();
133 }
134 EXPORT_SYMBOL_GPL(kernel_fpu_begin);
135
136 void kernel_fpu_end(void)
137 {
138 __kernel_fpu_end();
139 preempt_enable();
140 }
141 EXPORT_SYMBOL_GPL(kernel_fpu_end);
142
143 /*
144 * CR0::TS save/restore functions:
145 */
146 int irq_ts_save(void)
147 {
148 /*
149 * If in process context and not atomic, we can take a spurious DNA fault.
150 * Otherwise, doing clts() in process context requires disabling preemption
151 * or some heavy lifting like kernel_fpu_begin()
152 */
153 if (!in_atomic())
154 return 0;
155
156 if (read_cr0() & X86_CR0_TS) {
157 clts();
158 return 1;
159 }
160
161 return 0;
162 }
163 EXPORT_SYMBOL_GPL(irq_ts_save);
164
165 void irq_ts_restore(int TS_state)
166 {
167 if (TS_state)
168 stts();
169 }
170 EXPORT_SYMBOL_GPL(irq_ts_restore);
171
172 /*
173 * Save the FPU state (mark it for reload if necessary):
174 *
175 * This only ever gets called for the current task.
176 */
177 void fpu__save(struct fpu *fpu)
178 {
179 WARN_ON(fpu != &current->thread.fpu);
180
181 preempt_disable();
182 if (fpu->fpregs_active) {
183 if (!copy_fpregs_to_fpstate(fpu))
184 fpregs_deactivate(fpu);
185 }
186 preempt_enable();
187 }
188 EXPORT_SYMBOL_GPL(fpu__save);
189
190 void fpstate_init(struct fpu *fpu)
191 {
192 if (!cpu_has_fpu) {
193 finit_soft_fpu(&fpu->state.soft);
194 return;
195 }
196
197 memset(&fpu->state, 0, xstate_size);
198
199 if (cpu_has_fxsr) {
200 fx_finit(&fpu->state.fxsave);
201 } else {
202 struct i387_fsave_struct *fp = &fpu->state.fsave;
203 fp->cwd = 0xffff037fu;
204 fp->swd = 0xffff0000u;
205 fp->twd = 0xffffffffu;
206 fp->fos = 0xffff0000u;
207 }
208 }
209 EXPORT_SYMBOL_GPL(fpstate_init);
210
211 /*
212 * Copy the current task's FPU state to a new task's FPU context.
213 *
214 * In the 'eager' case we just save to the destination context.
215 *
216 * In the 'lazy' case we save to the source context, mark the FPU lazy
217 * via stts() and copy the source context into the destination context.
218 */
219 static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
220 {
221 WARN_ON(src_fpu != &current->thread.fpu);
222
223 /*
224 * Don't let 'init optimized' areas of the XSAVE area
225 * leak into the child task:
226 */
227 if (use_eager_fpu())
228 memset(&dst_fpu->state.xsave, 0, xstate_size);
229
230 /*
231 * Save current FPU registers directly into the child
232 * FPU context, without any memory-to-memory copying.
233 *
234 * If the FPU context got destroyed in the process (FNSAVE
235 * done on old CPUs) then copy it back into the source
236 * context and mark the current task for lazy restore.
237 *
238 * We have to do all this with preemption disabled,
239 * mostly because of the FNSAVE case, because in that
240 * case we must not allow preemption in the window
241 * between the FNSAVE and us marking the context lazy.
242 *
243 * It shouldn't be an issue as even FNSAVE is plenty
244 * fast in terms of critical section length.
245 */
246 preempt_disable();
247 if (!copy_fpregs_to_fpstate(dst_fpu)) {
248 memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
249 fpregs_deactivate(src_fpu);
250 }
251 preempt_enable();
252 }
253
254 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
255 {
256 dst_fpu->counter = 0;
257 dst_fpu->fpregs_active = 0;
258 dst_fpu->last_cpu = -1;
259
260 if (src_fpu->fpstate_active)
261 fpu_copy(dst_fpu, src_fpu);
262
263 return 0;
264 }
265
266 /*
267 * Activate the current task's in-memory FPU context,
268 * if it has not been used before:
269 */
270 void fpu__activate_curr(struct fpu *fpu)
271 {
272 WARN_ON_ONCE(fpu != &current->thread.fpu);
273
274 if (!fpu->fpstate_active) {
275 fpstate_init(fpu);
276
277 /* Safe to do for the current task: */
278 fpu->fpstate_active = 1;
279 }
280 }
281 EXPORT_SYMBOL_GPL(fpu__activate_curr);
282
283 /*
284 * This function must be called before we modify a stopped child's
285 * fpstate.
286 *
287 * If the child has not used the FPU before then initialize its
288 * fpstate.
289 *
290 * If the child has used the FPU before then unlazy it.
291 *
292 * [ After this function call, after registers in the fpstate are
293 * modified and the child task has woken up, the child task will
294 * restore the modified FPU state from the modified context. If we
295 * didn't clear its lazy status here then the lazy in-registers
296 * state pending on its former CPU could be restored, corrupting
297 * the modifications. ]
298 *
299 * This function is also called before we read a stopped child's
300 * FPU state - to make sure it's initialized if the child has
301 * no active FPU state.
302 *
303 * TODO: A future optimization would be to skip the unlazying in
304 * the read-only case, it's not strictly necessary for
305 * read-only access to the context.
306 */
307 static void fpu__activate_stopped(struct fpu *child_fpu)
308 {
309 WARN_ON_ONCE(child_fpu == &current->thread.fpu);
310
311 if (child_fpu->fpstate_active) {
312 child_fpu->last_cpu = -1;
313 } else {
314 fpstate_init(child_fpu);
315
316 /* Safe to do for stopped child tasks: */
317 child_fpu->fpstate_active = 1;
318 }
319 }
320
321 /*
322 * 'fpu__restore()' saves the current math information in the
323 * old math state array, and gets the new ones from the current task
324 *
325 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
326 * Don't touch unless you *really* know how it works.
327 *
328 * Must be called with kernel preemption disabled (eg with local
329 * local interrupts as in the case of do_device_not_available).
330 */
331 void fpu__restore(void)
332 {
333 struct task_struct *tsk = current;
334 struct fpu *fpu = &tsk->thread.fpu;
335
336 fpu__activate_curr(fpu);
337
338 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
339 kernel_fpu_disable();
340 fpregs_activate(fpu);
341 if (unlikely(restore_fpu_checking(fpu))) {
342 fpu_reset_state(fpu);
343 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
344 } else {
345 tsk->thread.fpu.counter++;
346 }
347 kernel_fpu_enable();
348 }
349 EXPORT_SYMBOL_GPL(fpu__restore);
350
351 void fpu__clear(struct task_struct *tsk)
352 {
353 struct fpu *fpu = &tsk->thread.fpu;
354
355 WARN_ON_ONCE(tsk != current); /* Almost certainly an anomaly */
356
357 if (!use_eager_fpu()) {
358 /* FPU state will be reallocated lazily at the first use. */
359 drop_fpu(fpu);
360 } else {
361 if (!fpu->fpstate_active) {
362 fpu__activate_curr(fpu);
363 user_fpu_begin();
364 }
365 restore_init_xstate();
366 }
367 }
368
369 /*
370 * The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
371 * as the "regset->n" for the xstate regset will be updated based on the feature
372 * capabilites supported by the xsave.
373 */
374 int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
375 {
376 struct fpu *target_fpu = &target->thread.fpu;
377
378 return target_fpu->fpstate_active ? regset->n : 0;
379 }
380
381 int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
382 {
383 struct fpu *target_fpu = &target->thread.fpu;
384
385 return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
386 }
387
388 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
389 unsigned int pos, unsigned int count,
390 void *kbuf, void __user *ubuf)
391 {
392 struct fpu *fpu = &target->thread.fpu;
393
394 if (!cpu_has_fxsr)
395 return -ENODEV;
396
397 fpu__activate_stopped(fpu);
398 sanitize_i387_state(target);
399
400 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
401 &fpu->state.fxsave, 0, -1);
402 }
403
404 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
405 unsigned int pos, unsigned int count,
406 const void *kbuf, const void __user *ubuf)
407 {
408 struct fpu *fpu = &target->thread.fpu;
409 int ret;
410
411 if (!cpu_has_fxsr)
412 return -ENODEV;
413
414 fpu__activate_stopped(fpu);
415 sanitize_i387_state(target);
416
417 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
418 &fpu->state.fxsave, 0, -1);
419
420 /*
421 * mxcsr reserved bits must be masked to zero for security reasons.
422 */
423 fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
424
425 /*
426 * update the header bits in the xsave header, indicating the
427 * presence of FP and SSE state.
428 */
429 if (cpu_has_xsave)
430 fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE;
431
432 return ret;
433 }
434
435 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
436 unsigned int pos, unsigned int count,
437 void *kbuf, void __user *ubuf)
438 {
439 struct fpu *fpu = &target->thread.fpu;
440 struct xsave_struct *xsave;
441 int ret;
442
443 if (!cpu_has_xsave)
444 return -ENODEV;
445
446 fpu__activate_stopped(fpu);
447
448 xsave = &fpu->state.xsave;
449
450 /*
451 * Copy the 48bytes defined by the software first into the xstate
452 * memory layout in the thread struct, so that we can copy the entire
453 * xstateregs to the user using one user_regset_copyout().
454 */
455 memcpy(&xsave->i387.sw_reserved,
456 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
457 /*
458 * Copy the xstate memory layout.
459 */
460 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
461 return ret;
462 }
463
464 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
465 unsigned int pos, unsigned int count,
466 const void *kbuf, const void __user *ubuf)
467 {
468 struct fpu *fpu = &target->thread.fpu;
469 struct xsave_struct *xsave;
470 int ret;
471
472 if (!cpu_has_xsave)
473 return -ENODEV;
474
475 fpu__activate_stopped(fpu);
476
477 xsave = &fpu->state.xsave;
478
479 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
480 /*
481 * mxcsr reserved bits must be masked to zero for security reasons.
482 */
483 xsave->i387.mxcsr &= mxcsr_feature_mask;
484 xsave->header.xfeatures &= xfeatures_mask;
485 /*
486 * These bits must be zero.
487 */
488 memset(&xsave->header.reserved, 0, 48);
489
490 return ret;
491 }
492
493 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
494
495 /*
496 * FPU tag word conversions.
497 */
498
499 static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
500 {
501 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
502
503 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
504 tmp = ~twd;
505 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
506 /* and move the valid bits to the lower byte. */
507 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
508 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
509 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
510
511 return tmp;
512 }
513
514 #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
515 #define FP_EXP_TAG_VALID 0
516 #define FP_EXP_TAG_ZERO 1
517 #define FP_EXP_TAG_SPECIAL 2
518 #define FP_EXP_TAG_EMPTY 3
519
520 static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
521 {
522 struct _fpxreg *st;
523 u32 tos = (fxsave->swd >> 11) & 7;
524 u32 twd = (unsigned long) fxsave->twd;
525 u32 tag;
526 u32 ret = 0xffff0000u;
527 int i;
528
529 for (i = 0; i < 8; i++, twd >>= 1) {
530 if (twd & 0x1) {
531 st = FPREG_ADDR(fxsave, (i - tos) & 7);
532
533 switch (st->exponent & 0x7fff) {
534 case 0x7fff:
535 tag = FP_EXP_TAG_SPECIAL;
536 break;
537 case 0x0000:
538 if (!st->significand[0] &&
539 !st->significand[1] &&
540 !st->significand[2] &&
541 !st->significand[3])
542 tag = FP_EXP_TAG_ZERO;
543 else
544 tag = FP_EXP_TAG_SPECIAL;
545 break;
546 default:
547 if (st->significand[3] & 0x8000)
548 tag = FP_EXP_TAG_VALID;
549 else
550 tag = FP_EXP_TAG_SPECIAL;
551 break;
552 }
553 } else {
554 tag = FP_EXP_TAG_EMPTY;
555 }
556 ret |= tag << (2 * i);
557 }
558 return ret;
559 }
560
561 /*
562 * FXSR floating point environment conversions.
563 */
564
565 void
566 convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
567 {
568 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
569 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
570 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
571 int i;
572
573 env->cwd = fxsave->cwd | 0xffff0000u;
574 env->swd = fxsave->swd | 0xffff0000u;
575 env->twd = twd_fxsr_to_i387(fxsave);
576
577 #ifdef CONFIG_X86_64
578 env->fip = fxsave->rip;
579 env->foo = fxsave->rdp;
580 /*
581 * should be actually ds/cs at fpu exception time, but
582 * that information is not available in 64bit mode.
583 */
584 env->fcs = task_pt_regs(tsk)->cs;
585 if (tsk == current) {
586 savesegment(ds, env->fos);
587 } else {
588 env->fos = tsk->thread.ds;
589 }
590 env->fos |= 0xffff0000;
591 #else
592 env->fip = fxsave->fip;
593 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
594 env->foo = fxsave->foo;
595 env->fos = fxsave->fos;
596 #endif
597
598 for (i = 0; i < 8; ++i)
599 memcpy(&to[i], &from[i], sizeof(to[0]));
600 }
601
602 void convert_to_fxsr(struct task_struct *tsk,
603 const struct user_i387_ia32_struct *env)
604
605 {
606 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
607 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
608 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
609 int i;
610
611 fxsave->cwd = env->cwd;
612 fxsave->swd = env->swd;
613 fxsave->twd = twd_i387_to_fxsr(env->twd);
614 fxsave->fop = (u16) ((u32) env->fcs >> 16);
615 #ifdef CONFIG_X86_64
616 fxsave->rip = env->fip;
617 fxsave->rdp = env->foo;
618 /* cs and ds ignored */
619 #else
620 fxsave->fip = env->fip;
621 fxsave->fcs = (env->fcs & 0xffff);
622 fxsave->foo = env->foo;
623 fxsave->fos = env->fos;
624 #endif
625
626 for (i = 0; i < 8; ++i)
627 memcpy(&to[i], &from[i], sizeof(from[0]));
628 }
629
630 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
631 unsigned int pos, unsigned int count,
632 void *kbuf, void __user *ubuf)
633 {
634 struct fpu *fpu = &target->thread.fpu;
635 struct user_i387_ia32_struct env;
636
637 fpu__activate_stopped(fpu);
638
639 if (!static_cpu_has(X86_FEATURE_FPU))
640 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
641
642 if (!cpu_has_fxsr)
643 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
644 &fpu->state.fsave, 0,
645 -1);
646
647 sanitize_i387_state(target);
648
649 if (kbuf && pos == 0 && count == sizeof(env)) {
650 convert_from_fxsr(kbuf, target);
651 return 0;
652 }
653
654 convert_from_fxsr(&env, target);
655
656 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
657 }
658
659 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
660 unsigned int pos, unsigned int count,
661 const void *kbuf, const void __user *ubuf)
662 {
663 struct fpu *fpu = &target->thread.fpu;
664 struct user_i387_ia32_struct env;
665 int ret;
666
667 fpu__activate_stopped(fpu);
668
669 sanitize_i387_state(target);
670
671 if (!static_cpu_has(X86_FEATURE_FPU))
672 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
673
674 if (!cpu_has_fxsr)
675 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
676 &fpu->state.fsave, 0,
677 -1);
678
679 if (pos > 0 || count < sizeof(env))
680 convert_from_fxsr(&env, target);
681
682 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
683 if (!ret)
684 convert_to_fxsr(target, &env);
685
686 /*
687 * update the header bit in the xsave header, indicating the
688 * presence of FP.
689 */
690 if (cpu_has_xsave)
691 fpu->state.xsave.header.xfeatures |= XSTATE_FP;
692 return ret;
693 }
694
695 /*
696 * FPU state for core dumps.
697 * This is only used for a.out dumps now.
698 * It is declared generically using elf_fpregset_t (which is
699 * struct user_i387_struct) but is in fact only used for 32-bit
700 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
701 */
702 int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
703 {
704 struct task_struct *tsk = current;
705 struct fpu *fpu = &tsk->thread.fpu;
706 int fpvalid;
707
708 fpvalid = fpu->fpstate_active;
709 if (fpvalid)
710 fpvalid = !fpregs_get(tsk, NULL,
711 0, sizeof(struct user_i387_ia32_struct),
712 ufpu, NULL);
713
714 return fpvalid;
715 }
716 EXPORT_SYMBOL(dump_fpu);
717
718 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
This page took 0.057655 seconds and 5 git commands to generate.