MIPS: KVM: Relative branch to common exit handler
[deliverable/linux.git] / arch / x86 / kvm / cpuid.h
1 #ifndef ARCH_X86_KVM_CPUID_H
2 #define ARCH_X86_KVM_CPUID_H
3
4 #include "x86.h"
5 #include <asm/cpu.h>
6
7 int kvm_update_cpuid(struct kvm_vcpu *vcpu);
8 bool kvm_mpx_supported(void);
9 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
10 u32 function, u32 index);
11 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
12 struct kvm_cpuid_entry2 __user *entries,
13 unsigned int type);
14 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
15 struct kvm_cpuid *cpuid,
16 struct kvm_cpuid_entry __user *entries);
17 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18 struct kvm_cpuid2 *cpuid,
19 struct kvm_cpuid_entry2 __user *entries);
20 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21 struct kvm_cpuid2 *cpuid,
22 struct kvm_cpuid_entry2 __user *entries);
23 void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
24
25 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
26
27 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
28 {
29 return vcpu->arch.maxphyaddr;
30 }
31
32 static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
33 {
34 struct kvm_cpuid_entry2 *best;
35
36 if (!static_cpu_has(X86_FEATURE_XSAVE))
37 return false;
38
39 best = kvm_find_cpuid_entry(vcpu, 1, 0);
40 return best && (best->ecx & bit(X86_FEATURE_XSAVE));
41 }
42
43 static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
44 {
45 struct kvm_cpuid_entry2 *best;
46
47 best = kvm_find_cpuid_entry(vcpu, 1, 0);
48 return best && (best->edx & bit(X86_FEATURE_MTRR));
49 }
50
51 static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
52 {
53 struct kvm_cpuid_entry2 *best;
54
55 best = kvm_find_cpuid_entry(vcpu, 7, 0);
56 return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
57 }
58
59 static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
60 {
61 struct kvm_cpuid_entry2 *best;
62
63 best = kvm_find_cpuid_entry(vcpu, 7, 0);
64 return best && (best->ebx & bit(X86_FEATURE_SMEP));
65 }
66
67 static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
68 {
69 struct kvm_cpuid_entry2 *best;
70
71 best = kvm_find_cpuid_entry(vcpu, 7, 0);
72 return best && (best->ebx & bit(X86_FEATURE_SMAP));
73 }
74
75 static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
76 {
77 struct kvm_cpuid_entry2 *best;
78
79 best = kvm_find_cpuid_entry(vcpu, 7, 0);
80 return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
81 }
82
83 static inline bool guest_cpuid_has_pku(struct kvm_vcpu *vcpu)
84 {
85 struct kvm_cpuid_entry2 *best;
86
87 best = kvm_find_cpuid_entry(vcpu, 7, 0);
88 return best && (best->ecx & bit(X86_FEATURE_PKU));
89 }
90
91 static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
92 {
93 struct kvm_cpuid_entry2 *best;
94
95 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
96 return best && (best->edx & bit(X86_FEATURE_LM));
97 }
98
99 static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
100 {
101 struct kvm_cpuid_entry2 *best;
102
103 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
104 return best && (best->ecx & bit(X86_FEATURE_OSVW));
105 }
106
107 static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
108 {
109 struct kvm_cpuid_entry2 *best;
110
111 best = kvm_find_cpuid_entry(vcpu, 1, 0);
112 return best && (best->ecx & bit(X86_FEATURE_PCID));
113 }
114
115 static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
116 {
117 struct kvm_cpuid_entry2 *best;
118
119 best = kvm_find_cpuid_entry(vcpu, 1, 0);
120 return best && (best->ecx & bit(X86_FEATURE_X2APIC));
121 }
122
123 static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
124 {
125 struct kvm_cpuid_entry2 *best;
126
127 best = kvm_find_cpuid_entry(vcpu, 0, 0);
128 return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
129 }
130
131 static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
132 {
133 struct kvm_cpuid_entry2 *best;
134
135 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
136 return best && (best->edx & bit(X86_FEATURE_GBPAGES));
137 }
138
139 static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
140 {
141 struct kvm_cpuid_entry2 *best;
142
143 best = kvm_find_cpuid_entry(vcpu, 7, 0);
144 return best && (best->ebx & bit(X86_FEATURE_RTM));
145 }
146
147 static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
148 {
149 struct kvm_cpuid_entry2 *best;
150
151 best = kvm_find_cpuid_entry(vcpu, 7, 0);
152 return best && (best->ebx & bit(X86_FEATURE_PCOMMIT));
153 }
154
155 static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
156 {
157 struct kvm_cpuid_entry2 *best;
158
159 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
160 return best && (best->edx & bit(X86_FEATURE_RDTSCP));
161 }
162
163 /*
164 * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
165 */
166 #define BIT_NRIPS 3
167
168 static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
169 {
170 struct kvm_cpuid_entry2 *best;
171
172 best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
173
174 /*
175 * NRIPS is a scattered cpuid feature, so we can't use
176 * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit
177 * position 8, not 3).
178 */
179 return best && (best->edx & bit(BIT_NRIPS));
180 }
181 #undef BIT_NRIPS
182
183 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
184 {
185 struct kvm_cpuid_entry2 *best;
186
187 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
188 if (!best)
189 return -1;
190
191 return x86_family(best->eax);
192 }
193
194 static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
195 {
196 struct kvm_cpuid_entry2 *best;
197
198 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
199 if (!best)
200 return -1;
201
202 return x86_model(best->eax);
203 }
204
205 static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
206 {
207 struct kvm_cpuid_entry2 *best;
208
209 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
210 if (!best)
211 return -1;
212
213 return x86_stepping(best->eax);
214 }
215
216 #endif
This page took 0.045073 seconds and 5 git commands to generate.