Merge tag 'nfs-for-4.5-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[deliverable/linux.git] / arch / x86 / include / asm / msr.h
1 #ifndef _ASM_X86_MSR_H
2 #define _ASM_X86_MSR_H
3
4 #include "msr-index.h"
5
6 #ifndef __ASSEMBLY__
7
8 #include <asm/asm.h>
9 #include <asm/errno.h>
10 #include <asm/cpumask.h>
11 #include <uapi/asm/msr.h>
12
13 struct msr {
14 union {
15 struct {
16 u32 l;
17 u32 h;
18 };
19 u64 q;
20 };
21 };
22
23 struct msr_info {
24 u32 msr_no;
25 struct msr reg;
26 struct msr *msrs;
27 int err;
28 };
29
30 struct msr_regs_info {
31 u32 *regs;
32 int err;
33 };
34
35 struct saved_msr {
36 bool valid;
37 struct msr_info info;
38 };
39
40 struct saved_msrs {
41 unsigned int num;
42 struct saved_msr *array;
43 };
44
45 static inline unsigned long long native_read_tscp(unsigned int *aux)
46 {
47 unsigned long low, high;
48 asm volatile(".byte 0x0f,0x01,0xf9"
49 : "=a" (low), "=d" (high), "=c" (*aux));
50 return low | ((u64)high << 32);
51 }
52
53 /*
54 * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
55 * constraint has different meanings. For i386, "A" means exactly
56 * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
57 * it means rax *or* rdx.
58 */
59 #ifdef CONFIG_X86_64
60 /* Using 64-bit values saves one instruction clearing the high half of low */
61 #define DECLARE_ARGS(val, low, high) unsigned long low, high
62 #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
63 #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
64 #else
65 #define DECLARE_ARGS(val, low, high) unsigned long long val
66 #define EAX_EDX_VAL(val, low, high) (val)
67 #define EAX_EDX_RET(val, low, high) "=A" (val)
68 #endif
69
70 #ifdef CONFIG_TRACEPOINTS
71 /*
72 * Be very careful with includes. This header is prone to include loops.
73 */
74 #include <asm/atomic.h>
75 #include <linux/tracepoint-defs.h>
76
77 extern struct tracepoint __tracepoint_read_msr;
78 extern struct tracepoint __tracepoint_write_msr;
79 extern struct tracepoint __tracepoint_rdpmc;
80 #define msr_tracepoint_active(t) static_key_false(&(t).key)
81 extern void do_trace_write_msr(unsigned msr, u64 val, int failed);
82 extern void do_trace_read_msr(unsigned msr, u64 val, int failed);
83 extern void do_trace_rdpmc(unsigned msr, u64 val, int failed);
84 #else
85 #define msr_tracepoint_active(t) false
86 static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {}
87 static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {}
88 static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {}
89 #endif
90
91 static inline unsigned long long native_read_msr(unsigned int msr)
92 {
93 DECLARE_ARGS(val, low, high);
94
95 asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
96 if (msr_tracepoint_active(__tracepoint_read_msr))
97 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
98 return EAX_EDX_VAL(val, low, high);
99 }
100
101 static inline unsigned long long native_read_msr_safe(unsigned int msr,
102 int *err)
103 {
104 DECLARE_ARGS(val, low, high);
105
106 asm volatile("2: rdmsr ; xor %[err],%[err]\n"
107 "1:\n\t"
108 ".section .fixup,\"ax\"\n\t"
109 "3: mov %[fault],%[err] ; jmp 1b\n\t"
110 ".previous\n\t"
111 _ASM_EXTABLE(2b, 3b)
112 : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
113 : "c" (msr), [fault] "i" (-EIO));
114 if (msr_tracepoint_active(__tracepoint_read_msr))
115 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
116 return EAX_EDX_VAL(val, low, high);
117 }
118
119 static inline void native_write_msr(unsigned int msr,
120 unsigned low, unsigned high)
121 {
122 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
123 if (msr_tracepoint_active(__tracepoint_read_msr))
124 do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
125 }
126
127 /* Can be uninlined because referenced by paravirt */
128 notrace static inline int native_write_msr_safe(unsigned int msr,
129 unsigned low, unsigned high)
130 {
131 int err;
132 asm volatile("2: wrmsr ; xor %[err],%[err]\n"
133 "1:\n\t"
134 ".section .fixup,\"ax\"\n\t"
135 "3: mov %[fault],%[err] ; jmp 1b\n\t"
136 ".previous\n\t"
137 _ASM_EXTABLE(2b, 3b)
138 : [err] "=a" (err)
139 : "c" (msr), "0" (low), "d" (high),
140 [fault] "i" (-EIO)
141 : "memory");
142 if (msr_tracepoint_active(__tracepoint_read_msr))
143 do_trace_write_msr(msr, ((u64)high << 32 | low), err);
144 return err;
145 }
146
147 extern int rdmsr_safe_regs(u32 regs[8]);
148 extern int wrmsr_safe_regs(u32 regs[8]);
149
150 /**
151 * rdtsc() - returns the current TSC without ordering constraints
152 *
153 * rdtsc() returns the result of RDTSC as a 64-bit integer. The
154 * only ordering constraint it supplies is the ordering implied by
155 * "asm volatile": it will put the RDTSC in the place you expect. The
156 * CPU can and will speculatively execute that RDTSC, though, so the
157 * results can be non-monotonic if compared on different CPUs.
158 */
159 static __always_inline unsigned long long rdtsc(void)
160 {
161 DECLARE_ARGS(val, low, high);
162
163 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
164
165 return EAX_EDX_VAL(val, low, high);
166 }
167
168 /**
169 * rdtsc_ordered() - read the current TSC in program order
170 *
171 * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
172 * It is ordered like a load to a global in-memory counter. It should
173 * be impossible to observe non-monotonic rdtsc_unordered() behavior
174 * across multiple CPUs as long as the TSC is synced.
175 */
176 static __always_inline unsigned long long rdtsc_ordered(void)
177 {
178 /*
179 * The RDTSC instruction is not ordered relative to memory
180 * access. The Intel SDM and the AMD APM are both vague on this
181 * point, but empirically an RDTSC instruction can be
182 * speculatively executed before prior loads. An RDTSC
183 * immediately after an appropriate barrier appears to be
184 * ordered as a normal load, that is, it provides the same
185 * ordering guarantees as reading from a global memory location
186 * that some other imaginary CPU is updating continuously with a
187 * time stamp.
188 */
189 alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
190 "lfence", X86_FEATURE_LFENCE_RDTSC);
191 return rdtsc();
192 }
193
194 /* Deprecated, keep it for a cycle for easier merging: */
195 #define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0)
196
197 static inline unsigned long long native_read_pmc(int counter)
198 {
199 DECLARE_ARGS(val, low, high);
200
201 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
202 if (msr_tracepoint_active(__tracepoint_rdpmc))
203 do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
204 return EAX_EDX_VAL(val, low, high);
205 }
206
207 #ifdef CONFIG_PARAVIRT
208 #include <asm/paravirt.h>
209 #else
210 #include <linux/errno.h>
211 /*
212 * Access to machine-specific registers (available on 586 and better only)
213 * Note: the rd* operations modify the parameters directly (without using
214 * pointer indirection), this allows gcc to optimize better
215 */
216
217 #define rdmsr(msr, low, high) \
218 do { \
219 u64 __val = native_read_msr((msr)); \
220 (void)((low) = (u32)__val); \
221 (void)((high) = (u32)(__val >> 32)); \
222 } while (0)
223
224 static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
225 {
226 native_write_msr(msr, low, high);
227 }
228
229 #define rdmsrl(msr, val) \
230 ((val) = native_read_msr((msr)))
231
232 static inline void wrmsrl(unsigned msr, u64 val)
233 {
234 native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
235 }
236
237 /* wrmsr with exception handling */
238 static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
239 {
240 return native_write_msr_safe(msr, low, high);
241 }
242
243 /* rdmsr with exception handling */
244 #define rdmsr_safe(msr, low, high) \
245 ({ \
246 int __err; \
247 u64 __val = native_read_msr_safe((msr), &__err); \
248 (*low) = (u32)__val; \
249 (*high) = (u32)(__val >> 32); \
250 __err; \
251 })
252
253 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
254 {
255 int err;
256
257 *p = native_read_msr_safe(msr, &err);
258 return err;
259 }
260
261 #define rdpmc(counter, low, high) \
262 do { \
263 u64 _l = native_read_pmc((counter)); \
264 (low) = (u32)_l; \
265 (high) = (u32)(_l >> 32); \
266 } while (0)
267
268 #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
269
270 #endif /* !CONFIG_PARAVIRT */
271
272 /*
273 * 64-bit version of wrmsr_safe():
274 */
275 static inline int wrmsrl_safe(u32 msr, u64 val)
276 {
277 return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
278 }
279
280 #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
281
282 #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
283
284 struct msr *msrs_alloc(void);
285 void msrs_free(struct msr *msrs);
286 int msr_set_bit(u32 msr, u8 bit);
287 int msr_clear_bit(u32 msr, u8 bit);
288
289 #ifdef CONFIG_SMP
290 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
291 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
292 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
293 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
294 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
295 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
296 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
297 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
298 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
299 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
300 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
301 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
302 #else /* CONFIG_SMP */
303 static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
304 {
305 rdmsr(msr_no, *l, *h);
306 return 0;
307 }
308 static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
309 {
310 wrmsr(msr_no, l, h);
311 return 0;
312 }
313 static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
314 {
315 rdmsrl(msr_no, *q);
316 return 0;
317 }
318 static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
319 {
320 wrmsrl(msr_no, q);
321 return 0;
322 }
323 static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
324 struct msr *msrs)
325 {
326 rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
327 }
328 static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
329 struct msr *msrs)
330 {
331 wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
332 }
333 static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
334 u32 *l, u32 *h)
335 {
336 return rdmsr_safe(msr_no, l, h);
337 }
338 static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
339 {
340 return wrmsr_safe(msr_no, l, h);
341 }
342 static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
343 {
344 return rdmsrl_safe(msr_no, q);
345 }
346 static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
347 {
348 return wrmsrl_safe(msr_no, q);
349 }
350 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
351 {
352 return rdmsr_safe_regs(regs);
353 }
354 static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
355 {
356 return wrmsr_safe_regs(regs);
357 }
358 #endif /* CONFIG_SMP */
359 #endif /* __ASSEMBLY__ */
360 #endif /* _ASM_X86_MSR_H */
This page took 0.055403 seconds and 5 git commands to generate.