Merge tag 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux...
[deliverable/linux.git] / arch / x86 / include / asm / xsave.h
CommitLineData
dc1e35c6
SS
1#ifndef __ASM_X86_XSAVE_H
2#define __ASM_X86_XSAVE_H
3
6152e4b1 4#include <linux/types.h>
dc1e35c6 5#include <asm/processor.h>
dc1e35c6 6
ee813d53 7#define XSTATE_CPUID 0x0000000d
dc1e35c6 8
c2bc11f1
FY
9#define XSTATE_FP 0x1
10#define XSTATE_SSE 0x2
11#define XSTATE_YMM 0x4
12#define XSTATE_BNDREGS 0x8
13#define XSTATE_BNDCSR 0x10
14#define XSTATE_OPMASK 0x20
15#define XSTATE_ZMM_Hi256 0x40
16#define XSTATE_Hi16_ZMM 0x80
dc1e35c6
SS
17
18#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
612263b3 19#define XSTATE_AVX512 (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
56c103ec
LJ
20/* Bit 63 of XCR0 is reserved for future expansion */
21#define XSTATE_EXTEND_MASK (~(XSTATE_FPSSE | (1ULL << 63)))
dc1e35c6
SS
22
23#define FXSAVE_SIZE 512
24
2d5b5a66
SY
25#define XSAVE_HDR_SIZE 64
26#define XSAVE_HDR_OFFSET FXSAVE_SIZE
27
28#define XSAVE_YMM_SIZE 256
29#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
5ee481da 30
e7d820a5 31/* Supported features which support lazy state saving */
c2bc11f1
FY
32#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
33 | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
e7d820a5
QR
34
35/* Supported features which require eager state saving */
36#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
37
38/* All currently supported features */
39#define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER)
dc1e35c6 40
b359e8a4
SS
41#ifdef CONFIG_X86_64
42#define REX_PREFIX "0x48, "
43#else
44#define REX_PREFIX
45#endif
46
6152e4b1
PA
47extern unsigned int xstate_size;
48extern u64 pcntxt_mask;
5b3efd50 49extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
304bceda 50extern struct xsave_struct *init_xstate_buf;
dc1e35c6 51
dc1e35c6 52extern void xsave_init(void);
5b3efd50 53extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
b359e8a4
SS
54extern int init_fpu(struct task_struct *child);
55
c9e5a5a7 56/* These macros all use (%edi)/(%rdi) as the single memory argument. */
200b08a9
FY
57#define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
58#define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
59#define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
60#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
61#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
62
b84e7055
FY
63#define xstate_fault ".section .fixup,\"ax\"\n" \
64 "3: movl $-1,%[err]\n" \
65 " jmp 2b\n" \
66 ".previous\n" \
67 _ASM_EXTABLE(1b, 3b) \
68 : [err] "=r" (err)
69
adb9d526
FY
70/*
71 * This function is called only during boot time when x86 caps are not set
72 * up and alternative can not be used yet.
73 */
d0f2dd18 74static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
adb9d526
FY
75{
76 u32 lmask = mask;
77 u32 hmask = mask >> 32;
78 int err = 0;
79
80 WARN_ON(system_state != SYSTEM_BOOTING);
81
82 if (boot_cpu_has(X86_FEATURE_XSAVES))
83 asm volatile("1:"XSAVES"\n\t"
84 "2:\n\t"
06c8173e
QC
85 xstate_fault
86 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
adb9d526
FY
87 : "memory");
88 else
89 asm volatile("1:"XSAVE"\n\t"
90 "2:\n\t"
06c8173e
QC
91 xstate_fault
92 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
adb9d526 93 : "memory");
adb9d526
FY
94 return err;
95}
96
97/*
98 * This function is called only during boot time when x86 caps are not set
99 * up and alternative can not be used yet.
100 */
101static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
102{
103 u32 lmask = mask;
104 u32 hmask = mask >> 32;
105 int err = 0;
106
107 WARN_ON(system_state != SYSTEM_BOOTING);
108
109 if (boot_cpu_has(X86_FEATURE_XSAVES))
110 asm volatile("1:"XRSTORS"\n\t"
111 "2:\n\t"
06c8173e
QC
112 xstate_fault
113 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
adb9d526
FY
114 : "memory");
115 else
116 asm volatile("1:"XRSTOR"\n\t"
117 "2:\n\t"
06c8173e
QC
118 xstate_fault
119 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
adb9d526 120 : "memory");
adb9d526
FY
121 return err;
122}
123
f31a9f7c
FY
124/*
125 * Save processor xstate to xsave area.
126 */
127static inline int xsave_state(struct xsave_struct *fx, u64 mask)
128{
129 u32 lmask = mask;
130 u32 hmask = mask >> 32;
131 int err = 0;
132
133 /*
134 * If xsaves is enabled, xsaves replaces xsaveopt because
135 * it supports compact format and supervisor states in addition to
136 * modified optimization in xsaveopt.
137 *
138 * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
139 * because xsaveopt supports modified optimization which is not
140 * supported by xsave.
141 *
142 * If none of xsaves and xsaveopt is enabled, use xsave.
143 */
144 alternative_input_2(
145 "1:"XSAVE,
06c8173e 146 XSAVEOPT,
f31a9f7c 147 X86_FEATURE_XSAVEOPT,
06c8173e 148 XSAVES,
f31a9f7c
FY
149 X86_FEATURE_XSAVES,
150 [fx] "D" (fx), "a" (lmask), "d" (hmask) :
151 "memory");
152 asm volatile("2:\n\t"
153 xstate_fault
154 : "0" (0)
155 : "memory");
156
157 return err;
158}
159
160/*
161 * Restore processor xstate from xsave area.
162 */
163static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
164{
165 int err = 0;
166 u32 lmask = mask;
167 u32 hmask = mask >> 32;
168
169 /*
170 * Use xrstors to restore context if it is enabled. xrstors supports
171 * compacted format of xsave area which is not supported by xrstor.
172 */
173 alternative_input(
174 "1: " XRSTOR,
06c8173e 175 XRSTORS,
f31a9f7c
FY
176 X86_FEATURE_XSAVES,
177 "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
178 : "memory");
179
180 asm volatile("2:\n"
181 xstate_fault
182 : "0" (0)
183 : "memory");
184
185 return err;
186}
187
f9de314b
FY
188/*
189 * Save xstate context for old process during context switch.
190 */
191static inline void fpu_xsave(struct fpu *fpu)
b359e8a4 192{
f9de314b
FY
193 xsave_state(&fpu->state->xsave, -1);
194}
b359e8a4 195
f9de314b
FY
196/*
197 * Restore xstate context for new process during context switch.
198 */
199static inline int fpu_xrstor_checking(struct xsave_struct *fx)
200{
201 return xrstor_state(fx, -1);
b359e8a4
SS
202}
203
facbf4d9
FY
204/*
205 * Save xstate to user space xsave area.
206 *
207 * We don't use modified optimization because xrstor/xrstors might track
208 * a different application.
209 *
210 * We don't use compacted format xsave area for
211 * backward compatibility for old applications which don't understand
212 * compacted format of xsave area.
213 */
c37b5efe 214static inline int xsave_user(struct xsave_struct __user *buf)
9dc89c0f
SS
215{
216 int err;
8e221b6d
SS
217
218 /*
219 * Clear the xsave header first, so that reserved fields are
220 * initialized to zero.
221 */
72a671ce 222 err = __clear_user(&buf->xsave_hdr, sizeof(buf->xsave_hdr));
8e221b6d
SS
223 if (unlikely(err))
224 return -EFAULT;
225
63bcff2a 226 __asm__ __volatile__(ASM_STAC "\n"
facbf4d9 227 "1:"XSAVE"\n"
63bcff2a 228 "2: " ASM_CLAC "\n"
facbf4d9 229 xstate_fault
9dc89c0f
SS
230 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
231 : "memory");
9dc89c0f
SS
232 return err;
233}
234
facbf4d9
FY
235/*
236 * Restore xstate from user space xsave area.
237 */
6152e4b1 238static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
9dc89c0f 239{
facbf4d9 240 int err = 0;
9dc89c0f 241 struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
6152e4b1
PA
242 u32 lmask = mask;
243 u32 hmask = mask >> 32;
9dc89c0f 244
63bcff2a 245 __asm__ __volatile__(ASM_STAC "\n"
facbf4d9 246 "1:"XRSTOR"\n"
63bcff2a 247 "2: " ASM_CLAC "\n"
facbf4d9 248 xstate_fault
9dc89c0f
SS
249 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
250 : "memory"); /* memory required? */
251 return err;
252}
253
7496d645
FY
254void *get_xsave_addr(struct xsave_struct *xsave, int xstate);
255void setup_xstate_comp(void);
256
dc1e35c6 257#endif
This page took 0.485522 seconds and 5 git commands to generate.