x86/fpu: Rename fpu-internal.h to fpu/internal.h
authorIngo Molnar <mingo@kernel.org>
Fri, 24 Apr 2015 00:54:44 +0000 (02:54 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 19 May 2015 13:47:31 +0000 (15:47 +0200)
This unifies all the FPU related header files under a unified, hiearchical
naming scheme:

 - asm/fpu/types.h:      FPU related data types, needed for 'struct task_struct',
                         widely included in almost all kernel code, and hence kept
                         as small as possible.

 - asm/fpu/api.h:        FPU related 'public' methods exported to other subsystems.

 - asm/fpu/internal.h:   FPU subsystem internal methods

 - asm/fpu/xsave.h:      XSAVE support internal methods

(Also standardize the header guard in asm/fpu/internal.h.)

Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
19 files changed:
arch/x86/crypto/crc32c-intel_glue.c
arch/x86/crypto/sha-mb/sha1_mb.c
arch/x86/ia32/ia32_signal.c
arch/x86/include/asm/fpu-internal.h [deleted file]
arch/x86/include/asm/fpu/internal.h [new file with mode: 0644]
arch/x86/kernel/cpu/common.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/init.c
arch/x86/kernel/fpu/xsave.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/signal.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/traps.c
arch/x86/kvm/x86.c
arch/x86/mm/mpx.c
arch/x86/power/cpu.c

index 470522cb042a85b98d6acab7908f0797d4108220..81a595d75cf5959bbcae8c2096ebdf0f538bf7f9 100644 (file)
@@ -32,7 +32,7 @@
 
 #include <asm/cpufeature.h>
 #include <asm/cpu_device_id.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 
 #define CHKSUM_BLOCK_SIZE      1
 #define CHKSUM_DIGEST_SIZE     4
index 02b64bbc1d488f293b5d19e3bc13e11acd59a8de..03ffaf8c22442d5c7d94ea58c39aaae1854fe2de 100644 (file)
@@ -68,7 +68,7 @@
 #include <asm/xcr.h>
 #include <asm/fpu/xsave.h>
 #include <linux/hardirq.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include "sha_mb_ctx.h"
 
 #define FLUSH_INTERVAL 1000 /* in usec */
index e1ec6f90d09e2d3915b187a03944e428f5b6e15b..d6d8f4ca5136ed6a4c72c637de7491b8f62824d0 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/binfmts.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/ptrace.h>
 #include <asm/ia32_unistd.h>
 #include <asm/user32.h>
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
deleted file mode 100644 (file)
index 20690a1..0000000
+++ /dev/null
@@ -1,556 +0,0 @@
-/*
- * Copyright (C) 1994 Linus Torvalds
- *
- * Pentium III FXSR, SSE support
- * General FPU state handling cleanups
- *     Gareth Hughes <gareth@valinux.com>, May 2000
- * x86-64 work by Andi Kleen 2002
- */
-
-#ifndef _FPU_INTERNAL_H
-#define _FPU_INTERNAL_H
-
-#include <linux/regset.h>
-#include <linux/compat.h>
-#include <linux/slab.h>
-
-#include <asm/user.h>
-#include <asm/fpu/api.h>
-#include <asm/fpu/xsave.h>
-
-#ifdef CONFIG_X86_64
-# include <asm/sigcontext32.h>
-# include <asm/user32.h>
-struct ksignal;
-int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
-                       compat_sigset_t *set, struct pt_regs *regs);
-int ia32_setup_frame(int sig, struct ksignal *ksig,
-                    compat_sigset_t *set, struct pt_regs *regs);
-#else
-# define user_i387_ia32_struct user_i387_struct
-# define user32_fxsr_struct    user_fxsr_struct
-# define ia32_setup_frame      __setup_frame
-# define ia32_setup_rt_frame   __setup_rt_frame
-#endif
-
-extern unsigned int mxcsr_feature_mask;
-extern void fpu__cpu_init(void);
-extern void eager_fpu_init(void);
-
-DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
-
-extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
-                             struct task_struct *tsk);
-extern void convert_to_fxsr(struct task_struct *tsk,
-                           const struct user_i387_ia32_struct *env);
-
-extern user_regset_active_fn fpregs_active, xfpregs_active;
-extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
-                               xstateregs_get;
-extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
-                                xstateregs_set;
-
-/*
- * xstateregs_active == fpregs_active. Please refer to the comment
- * at the definition of fpregs_active.
- */
-#define xstateregs_active      fpregs_active
-
-#ifdef CONFIG_MATH_EMULATION
-extern void finit_soft_fpu(struct i387_soft_struct *soft);
-#else
-static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
-#endif
-
-/*
- * Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx,
- * on this CPU.
- *
- * This will disable any lazy FPU state restore of the current FPU state,
- * but if the current thread owns the FPU, it will still be saved by.
- */
-static inline void __cpu_disable_lazy_restore(unsigned int cpu)
-{
-       per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
-}
-
-static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
-{
-       return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
-}
-
-static inline int is_ia32_compat_frame(void)
-{
-       return config_enabled(CONFIG_IA32_EMULATION) &&
-              test_thread_flag(TIF_IA32);
-}
-
-static inline int is_ia32_frame(void)
-{
-       return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
-}
-
-static inline int is_x32_frame(void)
-{
-       return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
-}
-
-#define X87_FSW_ES (1 << 7)    /* Exception Summary */
-
-static __always_inline __pure bool use_eager_fpu(void)
-{
-       return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
-}
-
-static __always_inline __pure bool use_xsaveopt(void)
-{
-       return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
-}
-
-static __always_inline __pure bool use_xsave(void)
-{
-       return static_cpu_has_safe(X86_FEATURE_XSAVE);
-}
-
-static __always_inline __pure bool use_fxsr(void)
-{
-       return static_cpu_has_safe(X86_FEATURE_FXSR);
-}
-
-static inline void fx_finit(struct i387_fxsave_struct *fx)
-{
-       fx->cwd = 0x37f;
-       fx->mxcsr = MXCSR_DEFAULT;
-}
-
-extern void __sanitize_i387_state(struct task_struct *);
-
-static inline void sanitize_i387_state(struct task_struct *tsk)
-{
-       if (!use_xsaveopt())
-               return;
-       __sanitize_i387_state(tsk);
-}
-
-#define user_insn(insn, output, input...)                              \
-({                                                                     \
-       int err;                                                        \
-       asm volatile(ASM_STAC "\n"                                      \
-                    "1:" #insn "\n\t"                                  \
-                    "2: " ASM_CLAC "\n"                                \
-                    ".section .fixup,\"ax\"\n"                         \
-                    "3:  movl $-1,%[err]\n"                            \
-                    "    jmp  2b\n"                                    \
-                    ".previous\n"                                      \
-                    _ASM_EXTABLE(1b, 3b)                               \
-                    : [err] "=r" (err), output                         \
-                    : "0"(0), input);                                  \
-       err;                                                            \
-})
-
-#define check_insn(insn, output, input...)                             \
-({                                                                     \
-       int err;                                                        \
-       asm volatile("1:" #insn "\n\t"                                  \
-                    "2:\n"                                             \
-                    ".section .fixup,\"ax\"\n"                         \
-                    "3:  movl $-1,%[err]\n"                            \
-                    "    jmp  2b\n"                                    \
-                    ".previous\n"                                      \
-                    _ASM_EXTABLE(1b, 3b)                               \
-                    : [err] "=r" (err), output                         \
-                    : "0"(0), input);                                  \
-       err;                                                            \
-})
-
-static inline int fsave_user(struct i387_fsave_struct __user *fx)
-{
-       return user_insn(fnsave %[fx]; fwait,  [fx] "=m" (*fx), "m" (*fx));
-}
-
-static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
-{
-       if (config_enabled(CONFIG_X86_32))
-               return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
-       else if (config_enabled(CONFIG_AS_FXSAVEQ))
-               return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
-
-       /* See comment in fpu_fxsave() below. */
-       return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
-}
-
-static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
-{
-       if (config_enabled(CONFIG_X86_32))
-               return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-       else if (config_enabled(CONFIG_AS_FXSAVEQ))
-               return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
-
-       /* See comment in fpu_fxsave() below. */
-       return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
-                         "m" (*fx));
-}
-
-static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
-{
-       if (config_enabled(CONFIG_X86_32))
-               return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-       else if (config_enabled(CONFIG_AS_FXSAVEQ))
-               return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
-
-       /* See comment in fpu_fxsave() below. */
-       return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
-                         "m" (*fx));
-}
-
-static inline int frstor_checking(struct i387_fsave_struct *fx)
-{
-       return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-}
-
-static inline int frstor_user(struct i387_fsave_struct __user *fx)
-{
-       return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-}
-
-static inline void fpu_fxsave(struct fpu *fpu)
-{
-       if (config_enabled(CONFIG_X86_32))
-               asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
-       else if (config_enabled(CONFIG_AS_FXSAVEQ))
-               asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
-       else {
-               /* Using "rex64; fxsave %0" is broken because, if the memory
-                * operand uses any extended registers for addressing, a second
-                * REX prefix will be generated (to the assembler, rex64
-                * followed by semicolon is a separate instruction), and hence
-                * the 64-bitness is lost.
-                *
-                * Using "fxsaveq %0" would be the ideal choice, but is only
-                * supported starting with gas 2.16.
-                *
-                * Using, as a workaround, the properly prefixed form below
-                * isn't accepted by any binutils version so far released,
-                * complaining that the same type of prefix is used twice if
-                * an extended register is needed for addressing (fix submitted
-                * to mainline 2005-11-21).
-                *
-                *  asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
-                *
-                * This, however, we can work around by forcing the compiler to
-                * select an addressing mode that doesn't require extended
-                * registers.
-                */
-               asm volatile( "rex64/fxsave (%[fx])"
-                            : "=m" (fpu->state->fxsave)
-                            : [fx] "R" (&fpu->state->fxsave));
-       }
-}
-
-/*
- * These must be called with preempt disabled. Returns
- * 'true' if the FPU state is still intact.
- */
-static inline int fpu_save_init(struct fpu *fpu)
-{
-       if (use_xsave()) {
-               xsave_state(&fpu->state->xsave);
-
-               /*
-                * xsave header may indicate the init state of the FP.
-                */
-               if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
-                       return 1;
-       } else if (use_fxsr()) {
-               fpu_fxsave(fpu);
-       } else {
-               asm volatile("fnsave %[fx]; fwait"
-                            : [fx] "=m" (fpu->state->fsave));
-               return 0;
-       }
-
-       /*
-        * If exceptions are pending, we need to clear them so
-        * that we don't randomly get exceptions later.
-        *
-        * FIXME! Is this perhaps only true for the old-style
-        * irq13 case? Maybe we could leave the x87 state
-        * intact otherwise?
-        */
-       if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
-               asm volatile("fnclex");
-               return 0;
-       }
-       return 1;
-}
-
-static inline int fpu_restore_checking(struct fpu *fpu)
-{
-       if (use_xsave())
-               return fpu_xrstor_checking(&fpu->state->xsave);
-       else if (use_fxsr())
-               return fxrstor_checking(&fpu->state->fxsave);
-       else
-               return frstor_checking(&fpu->state->fsave);
-}
-
-static inline int restore_fpu_checking(struct fpu *fpu)
-{
-       /*
-        * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
-        * pending. Clear the x87 state here by setting it to fixed values.
-        * "m" is a random variable that should be in L1.
-        */
-       if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
-               asm volatile(
-                       "fnclex\n\t"
-                       "emms\n\t"
-                       "fildl %P[addr]"        /* set F?P to defined value */
-                       : : [addr] "m" (fpu->has_fpu));
-       }
-
-       return fpu_restore_checking(fpu);
-}
-
-/* Must be paired with an 'stts' after! */
-static inline void __thread_clear_has_fpu(struct fpu *fpu)
-{
-       fpu->has_fpu = 0;
-       this_cpu_write(fpu_fpregs_owner_ctx, NULL);
-}
-
-/* Must be paired with a 'clts' before! */
-static inline void __thread_set_has_fpu(struct fpu *fpu)
-{
-       fpu->has_fpu = 1;
-       this_cpu_write(fpu_fpregs_owner_ctx, fpu);
-}
-
-/*
- * Encapsulate the CR0.TS handling together with the
- * software flag.
- *
- * These generally need preemption protection to work,
- * do try to avoid using these on their own.
- */
-static inline void __thread_fpu_end(struct fpu *fpu)
-{
-       __thread_clear_has_fpu(fpu);
-       if (!use_eager_fpu())
-               stts();
-}
-
-static inline void __thread_fpu_begin(struct fpu *fpu)
-{
-       if (!use_eager_fpu())
-               clts();
-       __thread_set_has_fpu(fpu);
-}
-
-static inline void drop_fpu(struct fpu *fpu)
-{
-       /*
-        * Forget coprocessor state..
-        */
-       preempt_disable();
-       fpu->counter = 0;
-
-       if (fpu->has_fpu) {
-               /* Ignore delayed exceptions from user space */
-               asm volatile("1: fwait\n"
-                            "2:\n"
-                            _ASM_EXTABLE(1b, 2b));
-               __thread_fpu_end(fpu);
-       }
-
-       fpu->fpstate_active = 0;
-
-       preempt_enable();
-}
-
-static inline void restore_init_xstate(void)
-{
-       if (use_xsave())
-               xrstor_state(init_xstate_buf, -1);
-       else
-               fxrstor_checking(&init_xstate_buf->i387);
-}
-
-/*
- * Reset the FPU state in the eager case and drop it in the lazy case (later use
- * will reinit it).
- */
-static inline void fpu_reset_state(struct fpu *fpu)
-{
-       if (!use_eager_fpu())
-               drop_fpu(fpu);
-       else
-               restore_init_xstate();
-}
-
-/*
- * FPU state switching for scheduling.
- *
- * This is a two-stage process:
- *
- *  - switch_fpu_prepare() saves the old state and
- *    sets the new state of the CR0.TS bit. This is
- *    done within the context of the old process.
- *
- *  - switch_fpu_finish() restores the new state as
- *    necessary.
- */
-typedef struct { int preload; } fpu_switch_t;
-
-static inline fpu_switch_t
-switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
-{
-       fpu_switch_t fpu;
-
-       /*
-        * If the task has used the math, pre-load the FPU on xsave processors
-        * or if the past 5 consecutive context-switches used math.
-        */
-       fpu.preload = new_fpu->fpstate_active &&
-                     (use_eager_fpu() || new_fpu->counter > 5);
-
-       if (old_fpu->has_fpu) {
-               if (!fpu_save_init(old_fpu))
-                       old_fpu->last_cpu = -1;
-               else
-                       old_fpu->last_cpu = cpu;
-
-               /* But leave fpu_fpregs_owner_ctx! */
-               old_fpu->has_fpu = 0;
-
-               /* Don't change CR0.TS if we just switch! */
-               if (fpu.preload) {
-                       new_fpu->counter++;
-                       __thread_set_has_fpu(new_fpu);
-                       prefetch(new_fpu->state);
-               } else if (!use_eager_fpu())
-                       stts();
-       } else {
-               old_fpu->counter = 0;
-               old_fpu->last_cpu = -1;
-               if (fpu.preload) {
-                       new_fpu->counter++;
-                       if (fpu_want_lazy_restore(new_fpu, cpu))
-                               fpu.preload = 0;
-                       else
-                               prefetch(new_fpu->state);
-                       __thread_fpu_begin(new_fpu);
-               }
-       }
-       return fpu;
-}
-
-/*
- * By the time this gets called, we've already cleared CR0.TS and
- * given the process the FPU if we are going to preload the FPU
- * state - all we need to do is to conditionally restore the register
- * state itself.
- */
-static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
-{
-       if (fpu_switch.preload) {
-               if (unlikely(restore_fpu_checking(new_fpu)))
-                       fpu_reset_state(new_fpu);
-       }
-}
-
-/*
- * Signal frame handlers...
- */
-extern int save_xstate_sig(void __user *buf, void __user *fx, int size);
-extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size);
-
-static inline int xstate_sigframe_size(void)
-{
-       return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
-}
-
-static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
-{
-       void __user *buf_fx = buf;
-       int size = xstate_sigframe_size();
-
-       if (ia32_frame && use_fxsr()) {
-               buf_fx = buf + sizeof(struct i387_fsave_struct);
-               size += sizeof(struct i387_fsave_struct);
-       }
-
-       return __restore_xstate_sig(buf, buf_fx, size);
-}
-
-/*
- * Needs to be preemption-safe.
- *
- * NOTE! user_fpu_begin() must be used only immediately before restoring
- * the save state. It does not do any saving/restoring on its own. In
- * lazy FPU mode, it is just an optimization to avoid a #NM exception,
- * the task can lose the FPU right after preempt_enable().
- */
-static inline void user_fpu_begin(void)
-{
-       struct fpu *fpu = &current->thread.fpu;
-
-       preempt_disable();
-       if (!user_has_fpu())
-               __thread_fpu_begin(fpu);
-       preempt_enable();
-}
-
-/*
- * i387 state interaction
- */
-static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
-{
-       if (cpu_has_fxsr) {
-               return tsk->thread.fpu.state->fxsave.cwd;
-       } else {
-               return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
-       }
-}
-
-static inline unsigned short get_fpu_swd(struct task_struct *tsk)
-{
-       if (cpu_has_fxsr) {
-               return tsk->thread.fpu.state->fxsave.swd;
-       } else {
-               return (unsigned short)tsk->thread.fpu.state->fsave.swd;
-       }
-}
-
-static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
-{
-       if (cpu_has_xmm) {
-               return tsk->thread.fpu.state->fxsave.mxcsr;
-       } else {
-               return MXCSR_DEFAULT;
-       }
-}
-
-extern void fpstate_cache_init(void);
-
-extern int fpstate_alloc(struct fpu *fpu);
-extern void fpstate_free(struct fpu *fpu);
-extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
-
-static inline unsigned long
-alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx,
-               unsigned long *size)
-{
-       unsigned long frame_size = xstate_sigframe_size();
-
-       *buf_fx = sp = round_down(sp - frame_size, 64);
-       if (ia32_frame && use_fxsr()) {
-               frame_size += sizeof(struct i387_fsave_struct);
-               sp -= sizeof(struct i387_fsave_struct);
-       }
-
-       *size = frame_size;
-       return sp;
-}
-
-#endif
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
new file mode 100644 (file)
index 0000000..386a883
--- /dev/null
@@ -0,0 +1,556 @@
+/*
+ * Copyright (C) 1994 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * General FPU state handling cleanups
+ *     Gareth Hughes <gareth@valinux.com>, May 2000
+ * x86-64 work by Andi Kleen 2002
+ */
+
+#ifndef _ASM_X86_FPU_INTERNAL_H
+#define _ASM_X86_FPU_INTERNAL_H
+
+#include <linux/regset.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+
+#include <asm/user.h>
+#include <asm/fpu/api.h>
+#include <asm/fpu/xsave.h>
+
+#ifdef CONFIG_X86_64
+# include <asm/sigcontext32.h>
+# include <asm/user32.h>
+struct ksignal;
+int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+                       compat_sigset_t *set, struct pt_regs *regs);
+int ia32_setup_frame(int sig, struct ksignal *ksig,
+                    compat_sigset_t *set, struct pt_regs *regs);
+#else
+# define user_i387_ia32_struct user_i387_struct
+# define user32_fxsr_struct    user_fxsr_struct
+# define ia32_setup_frame      __setup_frame
+# define ia32_setup_rt_frame   __setup_rt_frame
+#endif
+
+extern unsigned int mxcsr_feature_mask;
+extern void fpu__cpu_init(void);
+extern void eager_fpu_init(void);
+
+DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
+
+extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
+                             struct task_struct *tsk);
+extern void convert_to_fxsr(struct task_struct *tsk,
+                           const struct user_i387_ia32_struct *env);
+
+extern user_regset_active_fn fpregs_active, xfpregs_active;
+extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
+                               xstateregs_get;
+extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
+                                xstateregs_set;
+
+/*
+ * xstateregs_active == fpregs_active. Please refer to the comment
+ * at the definition of fpregs_active.
+ */
+#define xstateregs_active      fpregs_active
+
+#ifdef CONFIG_MATH_EMULATION
+extern void finit_soft_fpu(struct i387_soft_struct *soft);
+#else
+static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
+#endif
+
+/*
+ * Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx,
+ * on this CPU.
+ *
+ * This will disable any lazy FPU state restore of the current FPU state,
+ * but if the current thread owns the FPU, it will still be saved by.
+ */
+static inline void __cpu_disable_lazy_restore(unsigned int cpu)
+{
+       per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
+}
+
+static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
+{
+       return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
+}
+
+static inline int is_ia32_compat_frame(void)
+{
+       return config_enabled(CONFIG_IA32_EMULATION) &&
+              test_thread_flag(TIF_IA32);
+}
+
+static inline int is_ia32_frame(void)
+{
+       return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
+}
+
+static inline int is_x32_frame(void)
+{
+       return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
+}
+
+#define X87_FSW_ES (1 << 7)    /* Exception Summary */
+
+static __always_inline __pure bool use_eager_fpu(void)
+{
+       return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
+}
+
+static __always_inline __pure bool use_xsaveopt(void)
+{
+       return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
+}
+
+static __always_inline __pure bool use_xsave(void)
+{
+       return static_cpu_has_safe(X86_FEATURE_XSAVE);
+}
+
+static __always_inline __pure bool use_fxsr(void)
+{
+       return static_cpu_has_safe(X86_FEATURE_FXSR);
+}
+
+static inline void fx_finit(struct i387_fxsave_struct *fx)
+{
+       fx->cwd = 0x37f;
+       fx->mxcsr = MXCSR_DEFAULT;
+}
+
+extern void __sanitize_i387_state(struct task_struct *);
+
+static inline void sanitize_i387_state(struct task_struct *tsk)
+{
+       if (!use_xsaveopt())
+               return;
+       __sanitize_i387_state(tsk);
+}
+
+#define user_insn(insn, output, input...)                              \
+({                                                                     \
+       int err;                                                        \
+       asm volatile(ASM_STAC "\n"                                      \
+                    "1:" #insn "\n\t"                                  \
+                    "2: " ASM_CLAC "\n"                                \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "3:  movl $-1,%[err]\n"                            \
+                    "    jmp  2b\n"                                    \
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE(1b, 3b)                               \
+                    : [err] "=r" (err), output                         \
+                    : "0"(0), input);                                  \
+       err;                                                            \
+})
+
+#define check_insn(insn, output, input...)                             \
+({                                                                     \
+       int err;                                                        \
+       asm volatile("1:" #insn "\n\t"                                  \
+                    "2:\n"                                             \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "3:  movl $-1,%[err]\n"                            \
+                    "    jmp  2b\n"                                    \
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE(1b, 3b)                               \
+                    : [err] "=r" (err), output                         \
+                    : "0"(0), input);                                  \
+       err;                                                            \
+})
+
+static inline int fsave_user(struct i387_fsave_struct __user *fx)
+{
+       return user_insn(fnsave %[fx]; fwait,  [fx] "=m" (*fx), "m" (*fx));
+}
+
+static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
+{
+       if (config_enabled(CONFIG_X86_32))
+               return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
+       else if (config_enabled(CONFIG_AS_FXSAVEQ))
+               return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
+
+       /* See comment in fpu_fxsave() below. */
+       return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
+}
+
+static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
+{
+       if (config_enabled(CONFIG_X86_32))
+               return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+       else if (config_enabled(CONFIG_AS_FXSAVEQ))
+               return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
+
+       /* See comment in fpu_fxsave() below. */
+       return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
+                         "m" (*fx));
+}
+
+static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
+{
+       if (config_enabled(CONFIG_X86_32))
+               return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+       else if (config_enabled(CONFIG_AS_FXSAVEQ))
+               return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
+
+       /* See comment in fpu_fxsave() below. */
+       return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
+                         "m" (*fx));
+}
+
+static inline int frstor_checking(struct i387_fsave_struct *fx)
+{
+       return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+}
+
+static inline int frstor_user(struct i387_fsave_struct __user *fx)
+{
+       return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+}
+
+static inline void fpu_fxsave(struct fpu *fpu)
+{
+       if (config_enabled(CONFIG_X86_32))
+               asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
+       else if (config_enabled(CONFIG_AS_FXSAVEQ))
+               asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
+       else {
+               /* Using "rex64; fxsave %0" is broken because, if the memory
+                * operand uses any extended registers for addressing, a second
+                * REX prefix will be generated (to the assembler, rex64
+                * followed by semicolon is a separate instruction), and hence
+                * the 64-bitness is lost.
+                *
+                * Using "fxsaveq %0" would be the ideal choice, but is only
+                * supported starting with gas 2.16.
+                *
+                * Using, as a workaround, the properly prefixed form below
+                * isn't accepted by any binutils version so far released,
+                * complaining that the same type of prefix is used twice if
+                * an extended register is needed for addressing (fix submitted
+                * to mainline 2005-11-21).
+                *
+                *  asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
+                *
+                * This, however, we can work around by forcing the compiler to
+                * select an addressing mode that doesn't require extended
+                * registers.
+                */
+               asm volatile( "rex64/fxsave (%[fx])"
+                            : "=m" (fpu->state->fxsave)
+                            : [fx] "R" (&fpu->state->fxsave));
+       }
+}
+
+/*
+ * These must be called with preempt disabled. Returns
+ * 'true' if the FPU state is still intact.
+ */
+static inline int fpu_save_init(struct fpu *fpu)
+{
+       if (use_xsave()) {
+               xsave_state(&fpu->state->xsave);
+
+               /*
+                * xsave header may indicate the init state of the FP.
+                */
+               if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
+                       return 1;
+       } else if (use_fxsr()) {
+               fpu_fxsave(fpu);
+       } else {
+               asm volatile("fnsave %[fx]; fwait"
+                            : [fx] "=m" (fpu->state->fsave));
+               return 0;
+       }
+
+       /*
+        * If exceptions are pending, we need to clear them so
+        * that we don't randomly get exceptions later.
+        *
+        * FIXME! Is this perhaps only true for the old-style
+        * irq13 case? Maybe we could leave the x87 state
+        * intact otherwise?
+        */
+       if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
+               asm volatile("fnclex");
+               return 0;
+       }
+       return 1;
+}
+
+static inline int fpu_restore_checking(struct fpu *fpu)
+{
+       if (use_xsave())
+               return fpu_xrstor_checking(&fpu->state->xsave);
+       else if (use_fxsr())
+               return fxrstor_checking(&fpu->state->fxsave);
+       else
+               return frstor_checking(&fpu->state->fsave);
+}
+
+static inline int restore_fpu_checking(struct fpu *fpu)
+{
+       /*
+        * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
+        * pending. Clear the x87 state here by setting it to fixed values.
+        * "m" is a random variable that should be in L1.
+        */
+       if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
+               asm volatile(
+                       "fnclex\n\t"
+                       "emms\n\t"
+                       "fildl %P[addr]"        /* set F?P to defined value */
+                       : : [addr] "m" (fpu->has_fpu));
+       }
+
+       return fpu_restore_checking(fpu);
+}
+
+/* Must be paired with an 'stts' after! */
+static inline void __thread_clear_has_fpu(struct fpu *fpu)
+{
+       fpu->has_fpu = 0;
+       this_cpu_write(fpu_fpregs_owner_ctx, NULL);
+}
+
+/* Must be paired with a 'clts' before! */
+static inline void __thread_set_has_fpu(struct fpu *fpu)
+{
+       fpu->has_fpu = 1;
+       this_cpu_write(fpu_fpregs_owner_ctx, fpu);
+}
+
+/*
+ * Encapsulate the CR0.TS handling together with the
+ * software flag.
+ *
+ * These generally need preemption protection to work,
+ * do try to avoid using these on their own.
+ */
+static inline void __thread_fpu_end(struct fpu *fpu)
+{
+       __thread_clear_has_fpu(fpu);
+       if (!use_eager_fpu())
+               stts();
+}
+
+static inline void __thread_fpu_begin(struct fpu *fpu)
+{
+       if (!use_eager_fpu())
+               clts();
+       __thread_set_has_fpu(fpu);
+}
+
+static inline void drop_fpu(struct fpu *fpu)
+{
+       /*
+        * Forget coprocessor state..
+        */
+       preempt_disable();
+       fpu->counter = 0;
+
+       if (fpu->has_fpu) {
+               /* Ignore delayed exceptions from user space */
+               asm volatile("1: fwait\n"
+                            "2:\n"
+                            _ASM_EXTABLE(1b, 2b));
+               __thread_fpu_end(fpu);
+       }
+
+       fpu->fpstate_active = 0;
+
+       preempt_enable();
+}
+
+static inline void restore_init_xstate(void)
+{
+       if (use_xsave())
+               xrstor_state(init_xstate_buf, -1);
+       else
+               fxrstor_checking(&init_xstate_buf->i387);
+}
+
+/*
+ * Reset the FPU state in the eager case and drop it in the lazy case (later use
+ * will reinit it).
+ */
+static inline void fpu_reset_state(struct fpu *fpu)
+{
+       if (!use_eager_fpu())
+               drop_fpu(fpu);
+       else
+               restore_init_xstate();
+}
+
+/*
+ * FPU state switching for scheduling.
+ *
+ * This is a two-stage process:
+ *
+ *  - switch_fpu_prepare() saves the old state and
+ *    sets the new state of the CR0.TS bit. This is
+ *    done within the context of the old process.
+ *
+ *  - switch_fpu_finish() restores the new state as
+ *    necessary.
+ */
+typedef struct { int preload; } fpu_switch_t;
+
+static inline fpu_switch_t
+switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
+{
+       fpu_switch_t fpu;
+
+       /*
+        * If the task has used the math, pre-load the FPU on xsave processors
+        * or if the past 5 consecutive context-switches used math.
+        */
+       fpu.preload = new_fpu->fpstate_active &&
+                     (use_eager_fpu() || new_fpu->counter > 5);
+
+       if (old_fpu->has_fpu) {
+               if (!fpu_save_init(old_fpu))
+                       old_fpu->last_cpu = -1;
+               else
+                       old_fpu->last_cpu = cpu;
+
+               /* But leave fpu_fpregs_owner_ctx! */
+               old_fpu->has_fpu = 0;
+
+               /* Don't change CR0.TS if we just switch! */
+               if (fpu.preload) {
+                       new_fpu->counter++;
+                       __thread_set_has_fpu(new_fpu);
+                       prefetch(new_fpu->state);
+               } else if (!use_eager_fpu())
+                       stts();
+       } else {
+               old_fpu->counter = 0;
+               old_fpu->last_cpu = -1;
+               if (fpu.preload) {
+                       new_fpu->counter++;
+                       if (fpu_want_lazy_restore(new_fpu, cpu))
+                               fpu.preload = 0;
+                       else
+                               prefetch(new_fpu->state);
+                       __thread_fpu_begin(new_fpu);
+               }
+       }
+       return fpu;
+}
+
+/*
+ * By the time this gets called, we've already cleared CR0.TS and
+ * given the process the FPU if we are going to preload the FPU
+ * state - all we need to do is to conditionally restore the register
+ * state itself.
+ */
+static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
+{
+       if (fpu_switch.preload) {
+               if (unlikely(restore_fpu_checking(new_fpu)))
+                       fpu_reset_state(new_fpu);
+       }
+}
+
+/*
+ * Signal frame handlers...
+ */
+extern int save_xstate_sig(void __user *buf, void __user *fx, int size);
+extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size);
+
+static inline int xstate_sigframe_size(void)
+{
+       return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
+}
+
+static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
+{
+       void __user *buf_fx = buf;
+       int size = xstate_sigframe_size();
+
+       if (ia32_frame && use_fxsr()) {
+               buf_fx = buf + sizeof(struct i387_fsave_struct);
+               size += sizeof(struct i387_fsave_struct);
+       }
+
+       return __restore_xstate_sig(buf, buf_fx, size);
+}
+
+/*
+ * Needs to be preemption-safe.
+ *
+ * NOTE! user_fpu_begin() must be used only immediately before restoring
+ * the save state. It does not do any saving/restoring on its own. In
+ * lazy FPU mode, it is just an optimization to avoid a #NM exception,
+ * the task can lose the FPU right after preempt_enable().
+ */
+static inline void user_fpu_begin(void)
+{
+       struct fpu *fpu = &current->thread.fpu;
+
+       preempt_disable();
+       if (!user_has_fpu())
+               __thread_fpu_begin(fpu);
+       preempt_enable();
+}
+
+/*
+ * i387 state interaction
+ */
+static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
+{
+       if (cpu_has_fxsr) {
+               return tsk->thread.fpu.state->fxsave.cwd;
+       } else {
+               return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
+       }
+}
+
+static inline unsigned short get_fpu_swd(struct task_struct *tsk)
+{
+       if (cpu_has_fxsr) {
+               return tsk->thread.fpu.state->fxsave.swd;
+       } else {
+               return (unsigned short)tsk->thread.fpu.state->fsave.swd;
+       }
+}
+
+static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
+{
+       if (cpu_has_xmm) {
+               return tsk->thread.fpu.state->fxsave.mxcsr;
+       } else {
+               return MXCSR_DEFAULT;
+       }
+}
+
+extern void fpstate_cache_init(void);
+
+extern int fpstate_alloc(struct fpu *fpu);
+extern void fpstate_free(struct fpu *fpu);
+extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
+
+static inline unsigned long
+alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx,
+               unsigned long *size)
+{
+       unsigned long frame_size = xstate_sigframe_size();
+
+       *buf_fx = sp = round_down(sp - frame_size, 64);
+       if (ia32_frame && use_fxsr()) {
+               frame_size += sizeof(struct i387_fsave_struct);
+               sp -= sizeof(struct i387_fsave_struct);
+       }
+
+       *size = frame_size;
+       return sp;
+}
+
+#endif /* _ASM_X86_FPU_INTERNAL_H */
index 88bb7a75f5c61451615275c58d21009057de5e64..8f6a4ea39657c7e523ab9ac732bbe0e218d0e6d0 100644 (file)
@@ -31,7 +31,7 @@
 #include <asm/setup.h>
 #include <asm/apic.h>
 #include <asm/desc.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/mtrr.h>
 #include <linux/numa.h>
 #include <asm/asm.h>
index 176f69b24358257aab468574cdb122c2205ea615..30016f03f25dbc1f5ede6dbedaac8c8c7e1943e4 100644 (file)
@@ -5,7 +5,7 @@
  *  General FPU state handling cleanups
  *     Gareth Hughes <gareth@valinux.com>, May 2000
  */
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 
 /*
  * Track whether the kernel is using the FPU state
index 4eabb426e910c52732e18553b3ed84a476f9f2cf..33df056b1624cf8be39c3a9a59a166138c48160a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * x86 FPU boot time init code
  */
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/tlbflush.h>
 
 /*
index 8aa3b864a2e07b577c76ee6547e6ef7a2bc4c800..4ff726e4e29bb94e96fb095007be95876f08b616 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/compat.h>
 #include <linux/cpu.h>
 #include <asm/fpu/api.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/sigframe.h>
 #include <asm/tlbflush.h>
 #include <asm/xcr.h>
index 51ad3422e728c0dbe7597cd0f5a2a3d24d47c69a..71f4b4d2f1fdaf2ecdc50821858084c5e5670d51 100644 (file)
@@ -25,7 +25,7 @@
 #include <asm/idle.h>
 #include <asm/uaccess.h>
 #include <asm/mwait.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/debugreg.h>
 #include <asm/nmi.h>
 #include <asm/tlbflush.h>
index 7adc314b5075212f5fc3c3223d27ad17723e5678..deff651835b4ac5ca843c08f1de806619d004cf0 100644 (file)
@@ -39,7 +39,7 @@
 #include <asm/pgtable.h>
 #include <asm/ldt.h>
 #include <asm/processor.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/desc.h>
 #ifdef CONFIG_MATH_EMULATION
 #include <asm/math_emu.h>
index 4504569c6c4ec6c90db40cae3e49538ffb941bca..c50e013b57d20fe34b58932ce57dc671241e1435 100644 (file)
@@ -38,7 +38,7 @@
 
 #include <asm/pgtable.h>
 #include <asm/processor.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/mmu_context.h>
 #include <asm/prctl.h>
 #include <asm/desc.h>
index 69451b8965f7b8cd348326731907de6fc381e931..c14a00f54b61bf7718e7090cdd928f5df4824583 100644 (file)
@@ -28,7 +28,7 @@
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/processor.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/debugreg.h>
 #include <asm/ldt.h>
 #include <asm/desc.h>
index bcb853e44d30289bf92345a4cfcfa1f63aafb9ab..c67f96c8793811b0ccd0acb44c772398c4faaeb3 100644 (file)
@@ -26,7 +26,7 @@
 
 #include <asm/processor.h>
 #include <asm/ucontext.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/vdso.h>
 #include <asm/mce.h>
 #include <asm/sighandling.h>
index 60e331ceb844c34aa7db6d2dc6018985b1f7fd99..29f105f0d9fbcb49c58c8f7969addbcd65dc5b3c 100644 (file)
@@ -68,7 +68,7 @@
 #include <asm/mwait.h>
 #include <asm/apic.h>
 #include <asm/io_apic.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/setup.h>
 #include <asm/uv/uv.h>
 #include <linux/mc146818rtc.h>
index 8abcd6a6f3dc6dfe231c81a153375fb9327b58a8..a65586edbb57073eb2b0ce06f9f79c2f3c1a4c89 100644 (file)
@@ -54,7 +54,7 @@
 #include <asm/ftrace.h>
 #include <asm/traps.h>
 #include <asm/desc.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/mce.h>
 #include <asm/fixmap.h>
 #include <asm/mach_traps.h>
index 479d4ce25081d8be9718aff7a657d43abfbe8dfb..91d7f3b1e50c2941088e563dfa9efc07c63fd47d 100644 (file)
@@ -60,7 +60,7 @@
 #include <asm/mtrr.h>
 #include <asm/mce.h>
 #include <linux/kernel_stat.h>
-#include <asm/fpu-internal.h> /* Ugh! */
+#include <asm/fpu/internal.h> /* Ugh! */
 #include <asm/xcr.h>
 #include <asm/pvclock.h>
 #include <asm/div64.h>
index 412b5f81e54718801170f8b83aa8e1471fa77a43..5563be313fd6da005e3a15c144a4ed080c096e44 100644 (file)
@@ -15,7 +15,7 @@
 #include <asm/mmu_context.h>
 #include <asm/mpx.h>
 #include <asm/processor.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 
 static const char *mpx_mapping_name(struct vm_area_struct *vma)
 {
index 757678fb26e1a06277687c1c90f86e75377de03a..edaf934c749e015526de891f40734cd081acd29d 100644 (file)
@@ -21,7 +21,7 @@
 #include <asm/xcr.h>
 #include <asm/suspend.h>
 #include <asm/debugreg.h>
-#include <asm/fpu-internal.h> /* pcntxt_mask */
+#include <asm/fpu/internal.h> /* pcntxt_mask */
 #include <asm/cpu.h>
 
 #ifdef CONFIG_X86_32
This page took 0.047474 seconds and 5 git commands to generate.