Merge branches 'cpuidle', 'fixes' and 'misc' into for-linus
[deliverable/linux.git] / arch / x86 / kvm / cpuid.h
CommitLineData
00b27a3e
AK
1#ifndef ARCH_X86_KVM_CPUID_H
2#define ARCH_X86_KVM_CPUID_H
3
4#include "x86.h"
91713faf 5#include <asm/cpu.h>
00b27a3e 6
dd598091 7int kvm_update_cpuid(struct kvm_vcpu *vcpu);
a87036ad 8bool kvm_mpx_supported(void);
00b27a3e
AK
9struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
10 u32 function, u32 index);
9c15bb1d
BP
11int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
12 struct kvm_cpuid_entry2 __user *entries,
13 unsigned int type);
00b27a3e
AK
14int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
15 struct kvm_cpuid *cpuid,
16 struct kvm_cpuid_entry __user *entries);
17int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18 struct kvm_cpuid2 *cpuid,
19 struct kvm_cpuid_entry2 __user *entries);
20int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21 struct kvm_cpuid2 *cpuid,
22 struct kvm_cpuid_entry2 __user *entries);
62046e5a 23void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
00b27a3e 24
5a4f55cd
EK
25int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
26
27static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
28{
29 return vcpu->arch.maxphyaddr;
30}
00b27a3e
AK
31
32static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
33{
34 struct kvm_cpuid_entry2 *best;
35
6d1068b3 36 if (!static_cpu_has(X86_FEATURE_XSAVE))
1d804d07 37 return false;
6d1068b3 38
00b27a3e
AK
39 best = kvm_find_cpuid_entry(vcpu, 1, 0);
40 return best && (best->ecx & bit(X86_FEATURE_XSAVE));
41}
42
e24dea2a
PB
43static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
44{
45 struct kvm_cpuid_entry2 *best;
46
47 best = kvm_find_cpuid_entry(vcpu, 1, 0);
48 return best && (best->edx & bit(X86_FEATURE_MTRR));
49}
50
ba904635
WA
51static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
52{
53 struct kvm_cpuid_entry2 *best;
54
55 best = kvm_find_cpuid_entry(vcpu, 7, 0);
56 return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
57}
58
00b27a3e
AK
59static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
60{
61 struct kvm_cpuid_entry2 *best;
62
63 best = kvm_find_cpuid_entry(vcpu, 7, 0);
64 return best && (best->ebx & bit(X86_FEATURE_SMEP));
65}
66
97ec8c06
FW
67static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
68{
69 struct kvm_cpuid_entry2 *best;
70
71 best = kvm_find_cpuid_entry(vcpu, 7, 0);
72 return best && (best->ebx & bit(X86_FEATURE_SMAP));
73}
74
00b27a3e
AK
75static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
76{
77 struct kvm_cpuid_entry2 *best;
78
79 best = kvm_find_cpuid_entry(vcpu, 7, 0);
80 return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
81}
82
b9baba86
HH
83static inline bool guest_cpuid_has_pku(struct kvm_vcpu *vcpu)
84{
85 struct kvm_cpuid_entry2 *best;
86
87 best = kvm_find_cpuid_entry(vcpu, 7, 0);
88 return best && (best->ecx & bit(X86_FEATURE_PKU));
89}
90
660a5d51
PB
91static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
92{
93 struct kvm_cpuid_entry2 *best;
94
95 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
96 return best && (best->edx & bit(X86_FEATURE_LM));
97}
98
2b036c6b
BO
99static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
100{
101 struct kvm_cpuid_entry2 *best;
102
103 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
104 return best && (best->ecx & bit(X86_FEATURE_OSVW));
105}
106
ad756a16
MJ
107static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
108{
109 struct kvm_cpuid_entry2 *best;
110
111 best = kvm_find_cpuid_entry(vcpu, 1, 0);
112 return best && (best->ecx & bit(X86_FEATURE_PCID));
113}
114
58cb628d
JK
115static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
116{
117 struct kvm_cpuid_entry2 *best;
118
119 best = kvm_find_cpuid_entry(vcpu, 1, 0);
120 return best && (best->ecx & bit(X86_FEATURE_X2APIC));
121}
122
a0c0feb5
PB
123static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
124{
125 struct kvm_cpuid_entry2 *best;
126
127 best = kvm_find_cpuid_entry(vcpu, 0, 0);
128 return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
129}
130
5f7dde7b
NA
131static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
132{
133 struct kvm_cpuid_entry2 *best;
134
135 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
136 return best && (best->edx & bit(X86_FEATURE_GBPAGES));
137}
6f43ed01
NA
138
139static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
140{
141 struct kvm_cpuid_entry2 *best;
142
143 best = kvm_find_cpuid_entry(vcpu, 7, 0);
144 return best && (best->ebx & bit(X86_FEATURE_RTM));
145}
c447e76b 146
8b3e34e4
XG
147static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
148{
149 struct kvm_cpuid_entry2 *best;
150
151 best = kvm_find_cpuid_entry(vcpu, 7, 0);
152 return best && (best->ebx & bit(X86_FEATURE_PCOMMIT));
153}
1cea0ce6
XG
154
155static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
156{
157 struct kvm_cpuid_entry2 *best;
158
159 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
160 return best && (best->edx & bit(X86_FEATURE_RDTSCP));
161}
6092d3d3
JR
162
163/*
164 * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
165 */
166#define BIT_NRIPS 3
167
168static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
169{
170 struct kvm_cpuid_entry2 *best;
171
172 best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
173
174 /*
175 * NRIPS is a scattered cpuid feature, so we can't use
176 * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit
177 * position 8, not 3).
178 */
179 return best && (best->edx & bit(BIT_NRIPS));
180}
181#undef BIT_NRIPS
182
91713faf
BP
183static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
184{
185 struct kvm_cpuid_entry2 *best;
186
187 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
188 if (!best)
189 return -1;
190
191 return x86_family(best->eax);
192}
193
194static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
195{
196 struct kvm_cpuid_entry2 *best;
197
198 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
199 if (!best)
200 return -1;
201
202 return x86_model(best->eax);
203}
204
205static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
206{
207 struct kvm_cpuid_entry2 *best;
208
209 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
210 if (!best)
211 return -1;
212
213 return x86_stepping(best->eax);
214}
215
00b27a3e 216#endif
This page took 0.20226 seconds and 5 git commands to generate.