x86/fpu: Split out fpu/signal.h from fpu/internal.h for signal frame handling functions
[deliverable/linux.git] / arch / x86 / kernel / fpu / core.c
1 /*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8 #include <asm/fpu/internal.h>
9 #include <asm/fpu/signal.h>
10
11 #include <linux/hardirq.h>
12
13 /*
14 * Track whether the kernel is using the FPU state
15 * currently.
16 *
17 * This flag is used:
18 *
19 * - by IRQ context code to potentially use the FPU
20 * if it's unused.
21 *
22 * - to debug kernel_fpu_begin()/end() correctness
23 */
24 static DEFINE_PER_CPU(bool, in_kernel_fpu);
25
26 /*
27 * Track which context is using the FPU on the CPU:
28 */
29 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
30
31 static void kernel_fpu_disable(void)
32 {
33 WARN_ON(this_cpu_read(in_kernel_fpu));
34 this_cpu_write(in_kernel_fpu, true);
35 }
36
37 static void kernel_fpu_enable(void)
38 {
39 WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
40 this_cpu_write(in_kernel_fpu, false);
41 }
42
43 static bool kernel_fpu_disabled(void)
44 {
45 return this_cpu_read(in_kernel_fpu);
46 }
47
48 /*
49 * Were we in an interrupt that interrupted kernel mode?
50 *
51 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
52 * pair does nothing at all: the thread must not have fpu (so
53 * that we don't try to save the FPU state), and TS must
54 * be set (so that the clts/stts pair does nothing that is
55 * visible in the interrupted kernel thread).
56 *
57 * Except for the eagerfpu case when we return true; in the likely case
58 * the thread has FPU but we are not going to set/clear TS.
59 */
60 static bool interrupted_kernel_fpu_idle(void)
61 {
62 if (kernel_fpu_disabled())
63 return false;
64
65 if (use_eager_fpu())
66 return true;
67
68 return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
69 }
70
71 /*
72 * Were we in user mode (or vm86 mode) when we were
73 * interrupted?
74 *
75 * Doing kernel_fpu_begin/end() is ok if we are running
76 * in an interrupt context from user mode - we'll just
77 * save the FPU state as required.
78 */
79 static bool interrupted_user_mode(void)
80 {
81 struct pt_regs *regs = get_irq_regs();
82 return regs && user_mode(regs);
83 }
84
85 /*
86 * Can we use the FPU in kernel mode with the
87 * whole "kernel_fpu_begin/end()" sequence?
88 *
89 * It's always ok in process context (ie "not interrupt")
90 * but it is sometimes ok even from an irq.
91 */
92 bool irq_fpu_usable(void)
93 {
94 return !in_interrupt() ||
95 interrupted_user_mode() ||
96 interrupted_kernel_fpu_idle();
97 }
98 EXPORT_SYMBOL(irq_fpu_usable);
99
100 void __kernel_fpu_begin(void)
101 {
102 struct fpu *fpu = &current->thread.fpu;
103
104 kernel_fpu_disable();
105
106 if (fpu->fpregs_active) {
107 copy_fpregs_to_fpstate(fpu);
108 } else {
109 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
110 __fpregs_activate_hw();
111 }
112 }
113 EXPORT_SYMBOL(__kernel_fpu_begin);
114
115 void __kernel_fpu_end(void)
116 {
117 struct fpu *fpu = &current->thread.fpu;
118
119 if (fpu->fpregs_active) {
120 if (WARN_ON(copy_fpstate_to_fpregs(fpu)))
121 fpu__clear(fpu);
122 } else {
123 __fpregs_deactivate_hw();
124 }
125
126 kernel_fpu_enable();
127 }
128 EXPORT_SYMBOL(__kernel_fpu_end);
129
130 void kernel_fpu_begin(void)
131 {
132 preempt_disable();
133 WARN_ON_ONCE(!irq_fpu_usable());
134 __kernel_fpu_begin();
135 }
136 EXPORT_SYMBOL_GPL(kernel_fpu_begin);
137
138 void kernel_fpu_end(void)
139 {
140 __kernel_fpu_end();
141 preempt_enable();
142 }
143 EXPORT_SYMBOL_GPL(kernel_fpu_end);
144
145 /*
146 * CR0::TS save/restore functions:
147 */
148 int irq_ts_save(void)
149 {
150 /*
151 * If in process context and not atomic, we can take a spurious DNA fault.
152 * Otherwise, doing clts() in process context requires disabling preemption
153 * or some heavy lifting like kernel_fpu_begin()
154 */
155 if (!in_atomic())
156 return 0;
157
158 if (read_cr0() & X86_CR0_TS) {
159 clts();
160 return 1;
161 }
162
163 return 0;
164 }
165 EXPORT_SYMBOL_GPL(irq_ts_save);
166
167 void irq_ts_restore(int TS_state)
168 {
169 if (TS_state)
170 stts();
171 }
172 EXPORT_SYMBOL_GPL(irq_ts_restore);
173
174 /*
175 * Save the FPU state (mark it for reload if necessary):
176 *
177 * This only ever gets called for the current task.
178 */
179 void fpu__save(struct fpu *fpu)
180 {
181 WARN_ON(fpu != &current->thread.fpu);
182
183 preempt_disable();
184 if (fpu->fpregs_active) {
185 if (!copy_fpregs_to_fpstate(fpu))
186 fpregs_deactivate(fpu);
187 }
188 preempt_enable();
189 }
190 EXPORT_SYMBOL_GPL(fpu__save);
191
192 void fpstate_init(struct fpu *fpu)
193 {
194 if (!cpu_has_fpu) {
195 finit_soft_fpu(&fpu->state.soft);
196 return;
197 }
198
199 memset(&fpu->state, 0, xstate_size);
200
201 if (cpu_has_fxsr) {
202 fx_finit(&fpu->state.fxsave);
203 } else {
204 struct i387_fsave_struct *fp = &fpu->state.fsave;
205 fp->cwd = 0xffff037fu;
206 fp->swd = 0xffff0000u;
207 fp->twd = 0xffffffffu;
208 fp->fos = 0xffff0000u;
209 }
210 }
211 EXPORT_SYMBOL_GPL(fpstate_init);
212
213 /*
214 * Copy the current task's FPU state to a new task's FPU context.
215 *
216 * In the 'eager' case we just save to the destination context.
217 *
218 * In the 'lazy' case we save to the source context, mark the FPU lazy
219 * via stts() and copy the source context into the destination context.
220 */
221 static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
222 {
223 WARN_ON(src_fpu != &current->thread.fpu);
224
225 /*
226 * Don't let 'init optimized' areas of the XSAVE area
227 * leak into the child task:
228 */
229 if (use_eager_fpu())
230 memset(&dst_fpu->state.xsave, 0, xstate_size);
231
232 /*
233 * Save current FPU registers directly into the child
234 * FPU context, without any memory-to-memory copying.
235 *
236 * If the FPU context got destroyed in the process (FNSAVE
237 * done on old CPUs) then copy it back into the source
238 * context and mark the current task for lazy restore.
239 *
240 * We have to do all this with preemption disabled,
241 * mostly because of the FNSAVE case, because in that
242 * case we must not allow preemption in the window
243 * between the FNSAVE and us marking the context lazy.
244 *
245 * It shouldn't be an issue as even FNSAVE is plenty
246 * fast in terms of critical section length.
247 */
248 preempt_disable();
249 if (!copy_fpregs_to_fpstate(dst_fpu)) {
250 memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
251 fpregs_deactivate(src_fpu);
252 }
253 preempt_enable();
254 }
255
256 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
257 {
258 dst_fpu->counter = 0;
259 dst_fpu->fpregs_active = 0;
260 dst_fpu->last_cpu = -1;
261
262 if (src_fpu->fpstate_active)
263 fpu_copy(dst_fpu, src_fpu);
264
265 return 0;
266 }
267
268 /*
269 * Activate the current task's in-memory FPU context,
270 * if it has not been used before:
271 */
272 void fpu__activate_curr(struct fpu *fpu)
273 {
274 WARN_ON_ONCE(fpu != &current->thread.fpu);
275
276 if (!fpu->fpstate_active) {
277 fpstate_init(fpu);
278
279 /* Safe to do for the current task: */
280 fpu->fpstate_active = 1;
281 }
282 }
283 EXPORT_SYMBOL_GPL(fpu__activate_curr);
284
285 /*
286 * This function must be called before we modify a stopped child's
287 * fpstate.
288 *
289 * If the child has not used the FPU before then initialize its
290 * fpstate.
291 *
292 * If the child has used the FPU before then unlazy it.
293 *
294 * [ After this function call, after registers in the fpstate are
295 * modified and the child task has woken up, the child task will
296 * restore the modified FPU state from the modified context. If we
297 * didn't clear its lazy status here then the lazy in-registers
298 * state pending on its former CPU could be restored, corrupting
299 * the modifications. ]
300 *
301 * This function is also called before we read a stopped child's
302 * FPU state - to make sure it's initialized if the child has
303 * no active FPU state.
304 *
305 * TODO: A future optimization would be to skip the unlazying in
306 * the read-only case, it's not strictly necessary for
307 * read-only access to the context.
308 */
309 static void fpu__activate_stopped(struct fpu *child_fpu)
310 {
311 WARN_ON_ONCE(child_fpu == &current->thread.fpu);
312
313 if (child_fpu->fpstate_active) {
314 child_fpu->last_cpu = -1;
315 } else {
316 fpstate_init(child_fpu);
317
318 /* Safe to do for stopped child tasks: */
319 child_fpu->fpstate_active = 1;
320 }
321 }
322
323 /*
324 * 'fpu__restore()' is called to copy FPU registers from
325 * the FPU fpstate to the live hw registers and to activate
326 * access to the hardware registers, so that FPU instructions
327 * can be used afterwards.
328 *
329 * Must be called with kernel preemption disabled (for example
330 * with local interrupts disabled, as it is in the case of
331 * do_device_not_available()).
332 */
333 void fpu__restore(void)
334 {
335 struct task_struct *tsk = current;
336 struct fpu *fpu = &tsk->thread.fpu;
337
338 fpu__activate_curr(fpu);
339
340 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
341 kernel_fpu_disable();
342 fpregs_activate(fpu);
343 if (unlikely(copy_fpstate_to_fpregs(fpu))) {
344 fpu__clear(fpu);
345 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
346 } else {
347 tsk->thread.fpu.counter++;
348 }
349 kernel_fpu_enable();
350 }
351 EXPORT_SYMBOL_GPL(fpu__restore);
352
353 /*
354 * Drops current FPU state: deactivates the fpregs and
355 * the fpstate. NOTE: it still leaves previous contents
356 * in the fpregs in the eager-FPU case.
357 *
358 * This function can be used in cases where we know that
359 * a state-restore is coming: either an explicit one,
360 * or a reschedule.
361 */
362 void fpu__drop(struct fpu *fpu)
363 {
364 preempt_disable();
365 fpu->counter = 0;
366
367 if (fpu->fpregs_active) {
368 /* Ignore delayed exceptions from user space */
369 asm volatile("1: fwait\n"
370 "2:\n"
371 _ASM_EXTABLE(1b, 2b));
372 fpregs_deactivate(fpu);
373 }
374
375 fpu->fpstate_active = 0;
376
377 preempt_enable();
378 }
379
380 /*
381 * Clear the FPU state back to init state.
382 *
383 * Called by sys_execve(), by the signal handler code and by various
384 * error paths.
385 */
386 void fpu__clear(struct fpu *fpu)
387 {
388 WARN_ON_ONCE(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
389
390 if (!use_eager_fpu()) {
391 /* FPU state will be reallocated lazily at the first use. */
392 fpu__drop(fpu);
393 } else {
394 if (!fpu->fpstate_active) {
395 fpu__activate_curr(fpu);
396 user_fpu_begin();
397 }
398 restore_init_xstate();
399 }
400 }
401
402 /*
403 * The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
404 * as the "regset->n" for the xstate regset will be updated based on the feature
405 * capabilites supported by the xsave.
406 */
407 int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
408 {
409 struct fpu *target_fpu = &target->thread.fpu;
410
411 return target_fpu->fpstate_active ? regset->n : 0;
412 }
413
414 int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
415 {
416 struct fpu *target_fpu = &target->thread.fpu;
417
418 return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
419 }
420
421 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
422 unsigned int pos, unsigned int count,
423 void *kbuf, void __user *ubuf)
424 {
425 struct fpu *fpu = &target->thread.fpu;
426
427 if (!cpu_has_fxsr)
428 return -ENODEV;
429
430 fpu__activate_stopped(fpu);
431 fpstate_sanitize_xstate(fpu);
432
433 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
434 &fpu->state.fxsave, 0, -1);
435 }
436
437 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
438 unsigned int pos, unsigned int count,
439 const void *kbuf, const void __user *ubuf)
440 {
441 struct fpu *fpu = &target->thread.fpu;
442 int ret;
443
444 if (!cpu_has_fxsr)
445 return -ENODEV;
446
447 fpu__activate_stopped(fpu);
448 fpstate_sanitize_xstate(fpu);
449
450 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
451 &fpu->state.fxsave, 0, -1);
452
453 /*
454 * mxcsr reserved bits must be masked to zero for security reasons.
455 */
456 fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
457
458 /*
459 * update the header bits in the xsave header, indicating the
460 * presence of FP and SSE state.
461 */
462 if (cpu_has_xsave)
463 fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE;
464
465 return ret;
466 }
467
468 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
469 unsigned int pos, unsigned int count,
470 void *kbuf, void __user *ubuf)
471 {
472 struct fpu *fpu = &target->thread.fpu;
473 struct xsave_struct *xsave;
474 int ret;
475
476 if (!cpu_has_xsave)
477 return -ENODEV;
478
479 fpu__activate_stopped(fpu);
480
481 xsave = &fpu->state.xsave;
482
483 /*
484 * Copy the 48bytes defined by the software first into the xstate
485 * memory layout in the thread struct, so that we can copy the entire
486 * xstateregs to the user using one user_regset_copyout().
487 */
488 memcpy(&xsave->i387.sw_reserved,
489 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
490 /*
491 * Copy the xstate memory layout.
492 */
493 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
494 return ret;
495 }
496
497 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
498 unsigned int pos, unsigned int count,
499 const void *kbuf, const void __user *ubuf)
500 {
501 struct fpu *fpu = &target->thread.fpu;
502 struct xsave_struct *xsave;
503 int ret;
504
505 if (!cpu_has_xsave)
506 return -ENODEV;
507
508 fpu__activate_stopped(fpu);
509
510 xsave = &fpu->state.xsave;
511
512 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
513 /*
514 * mxcsr reserved bits must be masked to zero for security reasons.
515 */
516 xsave->i387.mxcsr &= mxcsr_feature_mask;
517 xsave->header.xfeatures &= xfeatures_mask;
518 /*
519 * These bits must be zero.
520 */
521 memset(&xsave->header.reserved, 0, 48);
522
523 return ret;
524 }
525
526 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
527
528 /*
529 * FPU tag word conversions.
530 */
531
532 static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
533 {
534 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
535
536 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
537 tmp = ~twd;
538 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
539 /* and move the valid bits to the lower byte. */
540 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
541 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
542 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
543
544 return tmp;
545 }
546
547 #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
548 #define FP_EXP_TAG_VALID 0
549 #define FP_EXP_TAG_ZERO 1
550 #define FP_EXP_TAG_SPECIAL 2
551 #define FP_EXP_TAG_EMPTY 3
552
553 static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
554 {
555 struct _fpxreg *st;
556 u32 tos = (fxsave->swd >> 11) & 7;
557 u32 twd = (unsigned long) fxsave->twd;
558 u32 tag;
559 u32 ret = 0xffff0000u;
560 int i;
561
562 for (i = 0; i < 8; i++, twd >>= 1) {
563 if (twd & 0x1) {
564 st = FPREG_ADDR(fxsave, (i - tos) & 7);
565
566 switch (st->exponent & 0x7fff) {
567 case 0x7fff:
568 tag = FP_EXP_TAG_SPECIAL;
569 break;
570 case 0x0000:
571 if (!st->significand[0] &&
572 !st->significand[1] &&
573 !st->significand[2] &&
574 !st->significand[3])
575 tag = FP_EXP_TAG_ZERO;
576 else
577 tag = FP_EXP_TAG_SPECIAL;
578 break;
579 default:
580 if (st->significand[3] & 0x8000)
581 tag = FP_EXP_TAG_VALID;
582 else
583 tag = FP_EXP_TAG_SPECIAL;
584 break;
585 }
586 } else {
587 tag = FP_EXP_TAG_EMPTY;
588 }
589 ret |= tag << (2 * i);
590 }
591 return ret;
592 }
593
594 /*
595 * FXSR floating point environment conversions.
596 */
597
598 void
599 convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
600 {
601 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
602 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
603 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
604 int i;
605
606 env->cwd = fxsave->cwd | 0xffff0000u;
607 env->swd = fxsave->swd | 0xffff0000u;
608 env->twd = twd_fxsr_to_i387(fxsave);
609
610 #ifdef CONFIG_X86_64
611 env->fip = fxsave->rip;
612 env->foo = fxsave->rdp;
613 /*
614 * should be actually ds/cs at fpu exception time, but
615 * that information is not available in 64bit mode.
616 */
617 env->fcs = task_pt_regs(tsk)->cs;
618 if (tsk == current) {
619 savesegment(ds, env->fos);
620 } else {
621 env->fos = tsk->thread.ds;
622 }
623 env->fos |= 0xffff0000;
624 #else
625 env->fip = fxsave->fip;
626 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
627 env->foo = fxsave->foo;
628 env->fos = fxsave->fos;
629 #endif
630
631 for (i = 0; i < 8; ++i)
632 memcpy(&to[i], &from[i], sizeof(to[0]));
633 }
634
635 void convert_to_fxsr(struct task_struct *tsk,
636 const struct user_i387_ia32_struct *env)
637
638 {
639 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
640 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
641 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
642 int i;
643
644 fxsave->cwd = env->cwd;
645 fxsave->swd = env->swd;
646 fxsave->twd = twd_i387_to_fxsr(env->twd);
647 fxsave->fop = (u16) ((u32) env->fcs >> 16);
648 #ifdef CONFIG_X86_64
649 fxsave->rip = env->fip;
650 fxsave->rdp = env->foo;
651 /* cs and ds ignored */
652 #else
653 fxsave->fip = env->fip;
654 fxsave->fcs = (env->fcs & 0xffff);
655 fxsave->foo = env->foo;
656 fxsave->fos = env->fos;
657 #endif
658
659 for (i = 0; i < 8; ++i)
660 memcpy(&to[i], &from[i], sizeof(from[0]));
661 }
662
663 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
664 unsigned int pos, unsigned int count,
665 void *kbuf, void __user *ubuf)
666 {
667 struct fpu *fpu = &target->thread.fpu;
668 struct user_i387_ia32_struct env;
669
670 fpu__activate_stopped(fpu);
671
672 if (!static_cpu_has(X86_FEATURE_FPU))
673 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
674
675 if (!cpu_has_fxsr)
676 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
677 &fpu->state.fsave, 0,
678 -1);
679
680 fpstate_sanitize_xstate(fpu);
681
682 if (kbuf && pos == 0 && count == sizeof(env)) {
683 convert_from_fxsr(kbuf, target);
684 return 0;
685 }
686
687 convert_from_fxsr(&env, target);
688
689 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
690 }
691
692 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
693 unsigned int pos, unsigned int count,
694 const void *kbuf, const void __user *ubuf)
695 {
696 struct fpu *fpu = &target->thread.fpu;
697 struct user_i387_ia32_struct env;
698 int ret;
699
700 fpu__activate_stopped(fpu);
701 fpstate_sanitize_xstate(fpu);
702
703 if (!static_cpu_has(X86_FEATURE_FPU))
704 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
705
706 if (!cpu_has_fxsr)
707 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
708 &fpu->state.fsave, 0,
709 -1);
710
711 if (pos > 0 || count < sizeof(env))
712 convert_from_fxsr(&env, target);
713
714 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
715 if (!ret)
716 convert_to_fxsr(target, &env);
717
718 /*
719 * update the header bit in the xsave header, indicating the
720 * presence of FP.
721 */
722 if (cpu_has_xsave)
723 fpu->state.xsave.header.xfeatures |= XSTATE_FP;
724 return ret;
725 }
726
727 /*
728 * FPU state for core dumps.
729 * This is only used for a.out dumps now.
730 * It is declared generically using elf_fpregset_t (which is
731 * struct user_i387_struct) but is in fact only used for 32-bit
732 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
733 */
734 int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
735 {
736 struct task_struct *tsk = current;
737 struct fpu *fpu = &tsk->thread.fpu;
738 int fpvalid;
739
740 fpvalid = fpu->fpstate_active;
741 if (fpvalid)
742 fpvalid = !fpregs_get(tsk, NULL,
743 0, sizeof(struct user_i387_ia32_struct),
744 ufpu, NULL);
745
746 return fpvalid;
747 }
748 EXPORT_SYMBOL(dump_fpu);
749
750 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
This page took 0.052527 seconds and 5 git commands to generate.