x86/fpu/xstate: Use explicit parameter in xstate_fault()
[deliverable/linux.git] / arch / x86 / include / asm / fpu / xstate.h
CommitLineData
dc1e35c6
SS
1#ifndef __ASM_X86_XSAVE_H
2#define __ASM_X86_XSAVE_H
3
6152e4b1 4#include <linux/types.h>
dc1e35c6 5#include <asm/processor.h>
59a36d16 6#include <linux/uaccess.h>
dc1e35c6 7
56c103ec
LJ
8/* Bit 63 of XCR0 is reserved for future expansion */
9#define XSTATE_EXTEND_MASK (~(XSTATE_FPSSE | (1ULL << 63)))
dc1e35c6 10
677b98bd
IM
11#define XSTATE_CPUID 0x0000000d
12
dc1e35c6
SS
13#define FXSAVE_SIZE 512
14
2d5b5a66
SY
15#define XSAVE_HDR_SIZE 64
16#define XSAVE_HDR_OFFSET FXSAVE_SIZE
17
18#define XSAVE_YMM_SIZE 256
19#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
5ee481da 20
e7d820a5 21/* Supported features which support lazy state saving */
c2bc11f1
FY
22#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
23 | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
e7d820a5
QR
24
25/* Supported features which require eager state saving */
26#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
27
28/* All currently supported features */
29#define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER)
dc1e35c6 30
b359e8a4
SS
31#ifdef CONFIG_X86_64
32#define REX_PREFIX "0x48, "
33#else
34#define REX_PREFIX
35#endif
36
6152e4b1 37extern unsigned int xstate_size;
614df7fb 38extern u64 xfeatures_mask;
5b3efd50 39extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
dc1e35c6 40
5b3efd50 41extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
b359e8a4 42
c9e5a5a7 43/* These macros all use (%edi)/(%rdi) as the single memory argument. */
200b08a9
FY
44#define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
45#define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
46#define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
47#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
48#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
49
b11ca7fb
IM
50/* xstate instruction fault handler: */
51#define xstate_fault(__err) \
52 \
53 ".section .fixup,\"ax\"\n" \
54 \
55 "3: movl $-1,%[err]\n" \
56 " jmp 2b\n" \
57 \
58 ".previous\n" \
59 \
60 _ASM_EXTABLE(1b, 3b) \
61 : [err] "=r" (__err)
b84e7055 62
adb9d526
FY
63/*
64 * This function is called only during boot time when x86 caps are not set
65 * up and alternative can not be used yet.
66 */
c47ada30 67static inline int copy_xregs_to_kernel_booting(struct xregs_state *fx)
adb9d526 68{
3e261c14 69 u64 mask = -1;
adb9d526
FY
70 u32 lmask = mask;
71 u32 hmask = mask >> 32;
72 int err = 0;
73
74 WARN_ON(system_state != SYSTEM_BOOTING);
75
76 if (boot_cpu_has(X86_FEATURE_XSAVES))
77 asm volatile("1:"XSAVES"\n\t"
78 "2:\n\t"
b11ca7fb 79 xstate_fault(err)
06c8173e 80 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
adb9d526
FY
81 : "memory");
82 else
83 asm volatile("1:"XSAVE"\n\t"
84 "2:\n\t"
b11ca7fb 85 xstate_fault(err)
06c8173e 86 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
adb9d526 87 : "memory");
adb9d526
FY
88 return err;
89}
90
91/*
92 * This function is called only during boot time when x86 caps are not set
93 * up and alternative can not be used yet.
94 */
c47ada30 95static inline int copy_kernel_to_xregs_booting(struct xregs_state *fx, u64 mask)
adb9d526
FY
96{
97 u32 lmask = mask;
98 u32 hmask = mask >> 32;
99 int err = 0;
100
101 WARN_ON(system_state != SYSTEM_BOOTING);
102
103 if (boot_cpu_has(X86_FEATURE_XSAVES))
104 asm volatile("1:"XRSTORS"\n\t"
105 "2:\n\t"
b11ca7fb 106 xstate_fault(err)
06c8173e 107 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
adb9d526
FY
108 : "memory");
109 else
110 asm volatile("1:"XRSTOR"\n\t"
111 "2:\n\t"
b11ca7fb 112 xstate_fault(err)
06c8173e 113 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
adb9d526 114 : "memory");
adb9d526
FY
115 return err;
116}
117
f31a9f7c
FY
118/*
119 * Save processor xstate to xsave area.
120 */
c47ada30 121static inline int copy_xregs_to_kernel(struct xregs_state *fx)
f31a9f7c 122{
3e261c14 123 u64 mask = -1;
f31a9f7c
FY
124 u32 lmask = mask;
125 u32 hmask = mask >> 32;
126 int err = 0;
127
5e907bb0 128 WARN_ON(!alternatives_patched);
72ee6f87 129
f31a9f7c
FY
130 /*
131 * If xsaves is enabled, xsaves replaces xsaveopt because
132 * it supports compact format and supervisor states in addition to
133 * modified optimization in xsaveopt.
134 *
135 * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
136 * because xsaveopt supports modified optimization which is not
137 * supported by xsave.
138 *
139 * If none of xsaves and xsaveopt is enabled, use xsave.
140 */
141 alternative_input_2(
142 "1:"XSAVE,
06c8173e 143 XSAVEOPT,
f31a9f7c 144 X86_FEATURE_XSAVEOPT,
06c8173e 145 XSAVES,
f31a9f7c
FY
146 X86_FEATURE_XSAVES,
147 [fx] "D" (fx), "a" (lmask), "d" (hmask) :
148 "memory");
149 asm volatile("2:\n\t"
b11ca7fb 150 xstate_fault(err)
f31a9f7c
FY
151 : "0" (0)
152 : "memory");
153
154 return err;
155}
156
157/*
158 * Restore processor xstate from xsave area.
159 */
c47ada30 160static inline int copy_kernel_to_xregs(struct xregs_state *fx, u64 mask)
f31a9f7c
FY
161{
162 int err = 0;
163 u32 lmask = mask;
164 u32 hmask = mask >> 32;
165
166 /*
167 * Use xrstors to restore context if it is enabled. xrstors supports
168 * compacted format of xsave area which is not supported by xrstor.
169 */
170 alternative_input(
171 "1: " XRSTOR,
06c8173e 172 XRSTORS,
f31a9f7c
FY
173 X86_FEATURE_XSAVES,
174 "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
175 : "memory");
176
177 asm volatile("2:\n"
b11ca7fb 178 xstate_fault(err)
f31a9f7c
FY
179 : "0" (0)
180 : "memory");
181
182 return err;
183}
184
facbf4d9
FY
185/*
186 * Save xstate to user space xsave area.
187 *
188 * We don't use modified optimization because xrstor/xrstors might track
189 * a different application.
190 *
191 * We don't use compacted format xsave area for
192 * backward compatibility for old applications which don't understand
193 * compacted format of xsave area.
194 */
c47ada30 195static inline int copy_xregs_to_user(struct xregs_state __user *buf)
9dc89c0f
SS
196{
197 int err;
8e221b6d
SS
198
199 /*
200 * Clear the xsave header first, so that reserved fields are
201 * initialized to zero.
202 */
3a54450b 203 err = __clear_user(&buf->header, sizeof(buf->header));
8e221b6d
SS
204 if (unlikely(err))
205 return -EFAULT;
206
63bcff2a 207 __asm__ __volatile__(ASM_STAC "\n"
facbf4d9 208 "1:"XSAVE"\n"
63bcff2a 209 "2: " ASM_CLAC "\n"
b11ca7fb 210 xstate_fault(err)
9dc89c0f
SS
211 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
212 : "memory");
9dc89c0f
SS
213 return err;
214}
215
facbf4d9
FY
216/*
217 * Restore xstate from user space xsave area.
218 */
c47ada30 219static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
9dc89c0f 220{
facbf4d9 221 int err = 0;
c47ada30 222 struct xregs_state *xstate = ((__force struct xregs_state *)buf);
6152e4b1
PA
223 u32 lmask = mask;
224 u32 hmask = mask >> 32;
9dc89c0f 225
63bcff2a 226 __asm__ __volatile__(ASM_STAC "\n"
facbf4d9 227 "1:"XRSTOR"\n"
63bcff2a 228 "2: " ASM_CLAC "\n"
b11ca7fb 229 xstate_fault(err)
9dc89c0f
SS
230 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
231 : "memory"); /* memory required? */
232 return err;
233}
234
c47ada30 235void *get_xsave_addr(struct xregs_state *xsave, int xstate);
7496d645 236
dc1e35c6 237#endif
This page took 0.527901 seconds and 5 git commands to generate.