2 * Kernel-based Virtual Machine driver for Linux
3 * cpuid support routines
5 * derived from arch/x86/kvm/x86.c
7 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8 * Copyright IBM Corporation, 2008
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
18 #include <linux/uaccess.h>
19 #include <asm/fpu/internal.h> /* For use_eager_fpu. Ugh! */
21 #include <asm/fpu/xstate.h>
28 static u32
xstate_required_size(u64 xstate_bv
, bool compacted
)
31 u32 ret
= XSAVE_HDR_SIZE
+ XSAVE_HDR_OFFSET
;
33 xstate_bv
&= XFEATURE_MASK_EXTEND
;
35 if (xstate_bv
& 0x1) {
36 u32 eax
, ebx
, ecx
, edx
, offset
;
37 cpuid_count(0xD, feature_bit
, &eax
, &ebx
, &ecx
, &edx
);
38 offset
= compacted
? ret
: ebx
;
39 ret
= max(ret
, offset
+ eax
);
49 bool kvm_mpx_supported(void)
51 return ((host_xcr0
& (XFEATURE_MASK_BNDREGS
| XFEATURE_MASK_BNDCSR
))
52 && kvm_x86_ops
->mpx_supported());
54 EXPORT_SYMBOL_GPL(kvm_mpx_supported
);
56 u64
kvm_supported_xcr0(void)
58 u64 xcr0
= KVM_SUPPORTED_XCR0
& host_xcr0
;
60 if (!kvm_mpx_supported())
61 xcr0
&= ~(XFEATURE_MASK_BNDREGS
| XFEATURE_MASK_BNDCSR
);
66 #define F(x) bit(X86_FEATURE_##x)
68 int kvm_update_cpuid(struct kvm_vcpu
*vcpu
)
70 struct kvm_cpuid_entry2
*best
;
71 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
73 best
= kvm_find_cpuid_entry(vcpu
, 1, 0);
77 /* Update OSXSAVE bit */
78 if (cpu_has_xsave
&& best
->function
== 0x1) {
79 best
->ecx
&= ~F(OSXSAVE
);
80 if (kvm_read_cr4_bits(vcpu
, X86_CR4_OSXSAVE
))
81 best
->ecx
|= F(OSXSAVE
);
85 if (best
->ecx
& F(TSC_DEADLINE_TIMER
))
86 apic
->lapic_timer
.timer_mode_mask
= 3 << 17;
88 apic
->lapic_timer
.timer_mode_mask
= 1 << 17;
91 best
= kvm_find_cpuid_entry(vcpu
, 0xD, 0);
93 vcpu
->arch
.guest_supported_xcr0
= 0;
94 vcpu
->arch
.guest_xstate_size
= XSAVE_HDR_SIZE
+ XSAVE_HDR_OFFSET
;
96 vcpu
->arch
.guest_supported_xcr0
=
97 (best
->eax
| ((u64
)best
->edx
<< 32)) &
99 vcpu
->arch
.guest_xstate_size
= best
->ebx
=
100 xstate_required_size(vcpu
->arch
.xcr0
, false);
103 best
= kvm_find_cpuid_entry(vcpu
, 0xD, 1);
104 if (best
&& (best
->eax
& (F(XSAVES
) | F(XSAVEC
))))
105 best
->ebx
= xstate_required_size(vcpu
->arch
.xcr0
, true);
108 kvm_x86_ops
->fpu_activate(vcpu
);
111 * The existing code assumes virtual address is 48-bit in the canonical
112 * address checks; exit if it is ever changed.
114 best
= kvm_find_cpuid_entry(vcpu
, 0x80000008, 0);
115 if (best
&& ((best
->eax
& 0xff00) >> 8) != 48 &&
116 ((best
->eax
& 0xff00) >> 8) != 0)
119 /* Update physical-address width */
120 vcpu
->arch
.maxphyaddr
= cpuid_query_maxphyaddr(vcpu
);
122 kvm_pmu_refresh(vcpu
);
126 static int is_efer_nx(void)
128 unsigned long long efer
= 0;
130 rdmsrl_safe(MSR_EFER
, &efer
);
131 return efer
& EFER_NX
;
134 static void cpuid_fix_nx_cap(struct kvm_vcpu
*vcpu
)
137 struct kvm_cpuid_entry2
*e
, *entry
;
140 for (i
= 0; i
< vcpu
->arch
.cpuid_nent
; ++i
) {
141 e
= &vcpu
->arch
.cpuid_entries
[i
];
142 if (e
->function
== 0x80000001) {
147 if (entry
&& (entry
->edx
& F(NX
)) && !is_efer_nx()) {
148 entry
->edx
&= ~F(NX
);
149 printk(KERN_INFO
"kvm: guest NX capability removed\n");
153 int cpuid_query_maxphyaddr(struct kvm_vcpu
*vcpu
)
155 struct kvm_cpuid_entry2
*best
;
157 best
= kvm_find_cpuid_entry(vcpu
, 0x80000000, 0);
158 if (!best
|| best
->eax
< 0x80000008)
160 best
= kvm_find_cpuid_entry(vcpu
, 0x80000008, 0);
162 return best
->eax
& 0xff;
166 EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr
);
168 /* when an old userspace process fills a new kernel module */
169 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu
*vcpu
,
170 struct kvm_cpuid
*cpuid
,
171 struct kvm_cpuid_entry __user
*entries
)
174 struct kvm_cpuid_entry
*cpuid_entries
;
177 if (cpuid
->nent
> KVM_MAX_CPUID_ENTRIES
)
180 cpuid_entries
= vmalloc(sizeof(struct kvm_cpuid_entry
) * cpuid
->nent
);
184 if (copy_from_user(cpuid_entries
, entries
,
185 cpuid
->nent
* sizeof(struct kvm_cpuid_entry
)))
187 for (i
= 0; i
< cpuid
->nent
; i
++) {
188 vcpu
->arch
.cpuid_entries
[i
].function
= cpuid_entries
[i
].function
;
189 vcpu
->arch
.cpuid_entries
[i
].eax
= cpuid_entries
[i
].eax
;
190 vcpu
->arch
.cpuid_entries
[i
].ebx
= cpuid_entries
[i
].ebx
;
191 vcpu
->arch
.cpuid_entries
[i
].ecx
= cpuid_entries
[i
].ecx
;
192 vcpu
->arch
.cpuid_entries
[i
].edx
= cpuid_entries
[i
].edx
;
193 vcpu
->arch
.cpuid_entries
[i
].index
= 0;
194 vcpu
->arch
.cpuid_entries
[i
].flags
= 0;
195 vcpu
->arch
.cpuid_entries
[i
].padding
[0] = 0;
196 vcpu
->arch
.cpuid_entries
[i
].padding
[1] = 0;
197 vcpu
->arch
.cpuid_entries
[i
].padding
[2] = 0;
199 vcpu
->arch
.cpuid_nent
= cpuid
->nent
;
200 cpuid_fix_nx_cap(vcpu
);
201 kvm_apic_set_version(vcpu
);
202 kvm_x86_ops
->cpuid_update(vcpu
);
203 r
= kvm_update_cpuid(vcpu
);
206 vfree(cpuid_entries
);
211 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu
*vcpu
,
212 struct kvm_cpuid2
*cpuid
,
213 struct kvm_cpuid_entry2 __user
*entries
)
218 if (cpuid
->nent
> KVM_MAX_CPUID_ENTRIES
)
221 if (copy_from_user(&vcpu
->arch
.cpuid_entries
, entries
,
222 cpuid
->nent
* sizeof(struct kvm_cpuid_entry2
)))
224 vcpu
->arch
.cpuid_nent
= cpuid
->nent
;
225 kvm_apic_set_version(vcpu
);
226 kvm_x86_ops
->cpuid_update(vcpu
);
227 r
= kvm_update_cpuid(vcpu
);
232 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu
*vcpu
,
233 struct kvm_cpuid2
*cpuid
,
234 struct kvm_cpuid_entry2 __user
*entries
)
239 if (cpuid
->nent
< vcpu
->arch
.cpuid_nent
)
242 if (copy_to_user(entries
, &vcpu
->arch
.cpuid_entries
,
243 vcpu
->arch
.cpuid_nent
* sizeof(struct kvm_cpuid_entry2
)))
248 cpuid
->nent
= vcpu
->arch
.cpuid_nent
;
252 static void cpuid_mask(u32
*word
, int wordnum
)
254 *word
&= boot_cpu_data
.x86_capability
[wordnum
];
257 static void do_cpuid_1_ent(struct kvm_cpuid_entry2
*entry
, u32 function
,
260 entry
->function
= function
;
261 entry
->index
= index
;
262 cpuid_count(entry
->function
, entry
->index
,
263 &entry
->eax
, &entry
->ebx
, &entry
->ecx
, &entry
->edx
);
267 static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2
*entry
,
268 u32 func
, u32 index
, int *nent
, int maxnent
)
272 entry
->eax
= 1; /* only one leaf currently */
276 entry
->ecx
= F(MOVBE
);
283 entry
->function
= func
;
284 entry
->index
= index
;
289 static inline int __do_cpuid_ent(struct kvm_cpuid_entry2
*entry
, u32 function
,
290 u32 index
, int *nent
, int maxnent
)
293 unsigned f_nx
= is_efer_nx() ? F(NX
) : 0;
295 unsigned f_gbpages
= (kvm_x86_ops
->get_lpage_level() == PT_PDPE_LEVEL
)
297 unsigned f_lm
= F(LM
);
299 unsigned f_gbpages
= 0;
302 unsigned f_rdtscp
= kvm_x86_ops
->rdtscp_supported() ? F(RDTSCP
) : 0;
303 unsigned f_invpcid
= kvm_x86_ops
->invpcid_supported() ? F(INVPCID
) : 0;
304 unsigned f_mpx
= kvm_mpx_supported() ? F(MPX
) : 0;
305 unsigned f_xsaves
= kvm_x86_ops
->xsaves_supported() ? F(XSAVES
) : 0;
308 const u32 kvm_supported_word0_x86_features
=
309 F(FPU
) | F(VME
) | F(DE
) | F(PSE
) |
310 F(TSC
) | F(MSR
) | F(PAE
) | F(MCE
) |
311 F(CX8
) | F(APIC
) | 0 /* Reserved */ | F(SEP
) |
312 F(MTRR
) | F(PGE
) | F(MCA
) | F(CMOV
) |
313 F(PAT
) | F(PSE36
) | 0 /* PSN */ | F(CLFLUSH
) |
314 0 /* Reserved, DS, ACPI */ | F(MMX
) |
315 F(FXSR
) | F(XMM
) | F(XMM2
) | F(SELFSNOOP
) |
316 0 /* HTT, TM, Reserved, PBE */;
317 /* cpuid 0x80000001.edx */
318 const u32 kvm_supported_word1_x86_features
=
319 F(FPU
) | F(VME
) | F(DE
) | F(PSE
) |
320 F(TSC
) | F(MSR
) | F(PAE
) | F(MCE
) |
321 F(CX8
) | F(APIC
) | 0 /* Reserved */ | F(SYSCALL
) |
322 F(MTRR
) | F(PGE
) | F(MCA
) | F(CMOV
) |
323 F(PAT
) | F(PSE36
) | 0 /* Reserved */ |
324 f_nx
| 0 /* Reserved */ | F(MMXEXT
) | F(MMX
) |
325 F(FXSR
) | F(FXSR_OPT
) | f_gbpages
| f_rdtscp
|
326 0 /* Reserved */ | f_lm
| F(3DNOWEXT
) | F(3DNOW
);
328 const u32 kvm_supported_word4_x86_features
=
329 /* NOTE: MONITOR (and MWAIT) are emulated as NOP,
330 * but *not* advertised to guests via CPUID ! */
331 F(XMM3
) | F(PCLMULQDQ
) | 0 /* DTES64, MONITOR */ |
332 0 /* DS-CPL, VMX, SMX, EST */ |
333 0 /* TM2 */ | F(SSSE3
) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
334 F(FMA
) | F(CX16
) | 0 /* xTPR Update, PDCM */ |
335 F(PCID
) | 0 /* Reserved, DCA */ | F(XMM4_1
) |
336 F(XMM4_2
) | F(X2APIC
) | F(MOVBE
) | F(POPCNT
) |
337 0 /* Reserved*/ | F(AES
) | F(XSAVE
) | 0 /* OSXSAVE */ | F(AVX
) |
339 /* cpuid 0x80000001.ecx */
340 const u32 kvm_supported_word6_x86_features
=
341 F(LAHF_LM
) | F(CMP_LEGACY
) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
342 F(CR8_LEGACY
) | F(ABM
) | F(SSE4A
) | F(MISALIGNSSE
) |
343 F(3DNOWPREFETCH
) | F(OSVW
) | 0 /* IBS */ | F(XOP
) |
344 0 /* SKINIT, WDT, LWP */ | F(FMA4
) | F(TBM
);
346 /* cpuid 0xC0000001.edx */
347 const u32 kvm_supported_word5_x86_features
=
348 F(XSTORE
) | F(XSTORE_EN
) | F(XCRYPT
) | F(XCRYPT_EN
) |
349 F(ACE2
) | F(ACE2_EN
) | F(PHE
) | F(PHE_EN
) |
353 const u32 kvm_supported_word9_x86_features
=
354 F(FSGSBASE
) | F(BMI1
) | F(HLE
) | F(AVX2
) | F(SMEP
) |
355 F(BMI2
) | F(ERMS
) | f_invpcid
| F(RTM
) | f_mpx
| F(RDSEED
) |
356 F(ADX
) | F(SMAP
) | F(AVX512F
) | F(AVX512PF
) | F(AVX512ER
) |
357 F(AVX512CD
) | F(CLFLUSHOPT
) | F(CLWB
) | F(PCOMMIT
);
359 /* cpuid 0xD.1.eax */
360 const u32 kvm_supported_word10_x86_features
=
361 F(XSAVEOPT
) | F(XSAVEC
) | F(XGETBV1
) | f_xsaves
;
363 /* all calls to cpuid_count() should be made on the same cpu */
368 if (*nent
>= maxnent
)
371 do_cpuid_1_ent(entry
, function
, index
);
376 entry
->eax
= min(entry
->eax
, (u32
)0xd);
379 entry
->edx
&= kvm_supported_word0_x86_features
;
380 cpuid_mask(&entry
->edx
, 0);
381 entry
->ecx
&= kvm_supported_word4_x86_features
;
382 cpuid_mask(&entry
->ecx
, 4);
383 /* we support x2apic emulation even if host does not support
384 * it since we emulate x2apic in software */
385 entry
->ecx
|= F(X2APIC
);
387 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
388 * may return different values. This forces us to get_cpu() before
389 * issuing the first command, and also to emulate this annoying behavior
390 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
392 int t
, times
= entry
->eax
& 0xff;
394 entry
->flags
|= KVM_CPUID_FLAG_STATEFUL_FUNC
;
395 entry
->flags
|= KVM_CPUID_FLAG_STATE_READ_NEXT
;
396 for (t
= 1; t
< times
; ++t
) {
397 if (*nent
>= maxnent
)
400 do_cpuid_1_ent(&entry
[t
], function
, 0);
401 entry
[t
].flags
|= KVM_CPUID_FLAG_STATEFUL_FUNC
;
406 /* function 4 has additional index. */
410 entry
->flags
|= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
411 /* read more entries until cache_type is zero */
413 if (*nent
>= maxnent
)
416 cache_type
= entry
[i
- 1].eax
& 0x1f;
419 do_cpuid_1_ent(&entry
[i
], function
, i
);
421 KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
426 case 6: /* Thermal management */
427 entry
->eax
= 0x4; /* allow ARAT */
433 entry
->flags
|= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
434 /* Mask ebx against host capability word 9 */
436 entry
->ebx
&= kvm_supported_word9_x86_features
;
437 cpuid_mask(&entry
->ebx
, 9);
438 // TSC_ADJUST is emulated
439 entry
->ebx
|= F(TSC_ADJUST
);
449 case 0xa: { /* Architectural Performance Monitoring */
450 struct x86_pmu_capability cap
;
451 union cpuid10_eax eax
;
452 union cpuid10_edx edx
;
454 perf_get_x86_pmu_capability(&cap
);
457 * Only support guest architectural pmu on a host
458 * with architectural pmu.
461 memset(&cap
, 0, sizeof(cap
));
463 eax
.split
.version_id
= min(cap
.version
, 2);
464 eax
.split
.num_counters
= cap
.num_counters_gp
;
465 eax
.split
.bit_width
= cap
.bit_width_gp
;
466 eax
.split
.mask_length
= cap
.events_mask_len
;
468 edx
.split
.num_counters_fixed
= cap
.num_counters_fixed
;
469 edx
.split
.bit_width_fixed
= cap
.bit_width_fixed
;
470 edx
.split
.reserved
= 0;
472 entry
->eax
= eax
.full
;
473 entry
->ebx
= cap
.events_mask
;
475 entry
->edx
= edx
.full
;
478 /* function 0xb has additional index. */
482 entry
->flags
|= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
483 /* read more entries until level_type is zero */
485 if (*nent
>= maxnent
)
488 level_type
= entry
[i
- 1].ecx
& 0xff00;
491 do_cpuid_1_ent(&entry
[i
], function
, i
);
493 KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
500 u64 supported
= kvm_supported_xcr0();
502 entry
->eax
&= supported
;
503 entry
->ebx
= xstate_required_size(supported
, false);
504 entry
->ecx
= entry
->ebx
;
505 entry
->edx
&= supported
>> 32;
506 entry
->flags
|= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
510 for (idx
= 1, i
= 1; idx
< 64; ++idx
) {
511 u64 mask
= ((u64
)1 << idx
);
512 if (*nent
>= maxnent
)
515 do_cpuid_1_ent(&entry
[i
], function
, idx
);
517 entry
[i
].eax
&= kvm_supported_word10_x86_features
;
519 if (entry
[i
].eax
& (F(XSAVES
)|F(XSAVEC
)))
521 xstate_required_size(supported
,
524 if (entry
[i
].eax
== 0 || !(supported
& mask
))
526 if (WARN_ON_ONCE(entry
[i
].ecx
& 1))
532 KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
538 case KVM_CPUID_SIGNATURE
: {
539 static const char signature
[12] = "KVMKVMKVM\0\0";
540 const u32
*sigptr
= (const u32
*)signature
;
541 entry
->eax
= KVM_CPUID_FEATURES
;
542 entry
->ebx
= sigptr
[0];
543 entry
->ecx
= sigptr
[1];
544 entry
->edx
= sigptr
[2];
547 case KVM_CPUID_FEATURES
:
548 entry
->eax
= (1 << KVM_FEATURE_CLOCKSOURCE
) |
549 (1 << KVM_FEATURE_NOP_IO_DELAY
) |
550 (1 << KVM_FEATURE_CLOCKSOURCE2
) |
551 (1 << KVM_FEATURE_ASYNC_PF
) |
552 (1 << KVM_FEATURE_PV_EOI
) |
553 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT
) |
554 (1 << KVM_FEATURE_PV_UNHALT
);
557 entry
->eax
|= (1 << KVM_FEATURE_STEAL_TIME
);
564 entry
->eax
= min(entry
->eax
, 0x8000001a);
567 entry
->edx
&= kvm_supported_word1_x86_features
;
568 cpuid_mask(&entry
->edx
, 1);
569 entry
->ecx
&= kvm_supported_word6_x86_features
;
570 cpuid_mask(&entry
->ecx
, 6);
572 case 0x80000007: /* Advanced power management */
573 /* invariant TSC is CPUID.80000007H:EDX[8] */
574 entry
->edx
&= (1 << 8);
575 /* mask against host */
576 entry
->edx
&= boot_cpu_data
.x86_power
;
577 entry
->eax
= entry
->ebx
= entry
->ecx
= 0;
580 unsigned g_phys_as
= (entry
->eax
>> 16) & 0xff;
581 unsigned virt_as
= max((entry
->eax
>> 8) & 0xff, 48U);
582 unsigned phys_as
= entry
->eax
& 0xff;
586 entry
->eax
= g_phys_as
| (virt_as
<< 8);
587 entry
->ebx
= entry
->edx
= 0;
591 entry
->ecx
= entry
->edx
= 0;
597 /*Add support for Centaur's CPUID instruction*/
599 /*Just support up to 0xC0000004 now*/
600 entry
->eax
= min(entry
->eax
, 0xC0000004);
603 entry
->edx
&= kvm_supported_word5_x86_features
;
604 cpuid_mask(&entry
->edx
, 5);
606 case 3: /* Processor serial number */
607 case 5: /* MONITOR/MWAIT */
612 entry
->eax
= entry
->ebx
= entry
->ecx
= entry
->edx
= 0;
616 kvm_x86_ops
->set_supported_cpuid(function
, entry
);
626 static int do_cpuid_ent(struct kvm_cpuid_entry2
*entry
, u32 func
,
627 u32 idx
, int *nent
, int maxnent
, unsigned int type
)
629 if (type
== KVM_GET_EMULATED_CPUID
)
630 return __do_cpuid_ent_emulated(entry
, func
, idx
, nent
, maxnent
);
632 return __do_cpuid_ent(entry
, func
, idx
, nent
, maxnent
);
637 struct kvm_cpuid_param
{
641 bool (*qualifier
)(const struct kvm_cpuid_param
*param
);
644 static bool is_centaur_cpu(const struct kvm_cpuid_param
*param
)
646 return boot_cpu_data
.x86_vendor
== X86_VENDOR_CENTAUR
;
649 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user
*entries
,
650 __u32 num_entries
, unsigned int ioctl_type
)
655 if (ioctl_type
!= KVM_GET_EMULATED_CPUID
)
659 * We want to make sure that ->padding is being passed clean from
660 * userspace in case we want to use it for something in the future.
662 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
663 * have to give ourselves satisfied only with the emulated side. /me
666 for (i
= 0; i
< num_entries
; i
++) {
667 if (copy_from_user(pad
, entries
[i
].padding
, sizeof(pad
)))
670 if (pad
[0] || pad
[1] || pad
[2])
676 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2
*cpuid
,
677 struct kvm_cpuid_entry2 __user
*entries
,
680 struct kvm_cpuid_entry2
*cpuid_entries
;
681 int limit
, nent
= 0, r
= -E2BIG
, i
;
683 static const struct kvm_cpuid_param param
[] = {
684 { .func
= 0, .has_leaf_count
= true },
685 { .func
= 0x80000000, .has_leaf_count
= true },
686 { .func
= 0xC0000000, .qualifier
= is_centaur_cpu
, .has_leaf_count
= true },
687 { .func
= KVM_CPUID_SIGNATURE
},
688 { .func
= KVM_CPUID_FEATURES
},
693 if (cpuid
->nent
> KVM_MAX_CPUID_ENTRIES
)
694 cpuid
->nent
= KVM_MAX_CPUID_ENTRIES
;
696 if (sanity_check_entries(entries
, cpuid
->nent
, type
))
700 cpuid_entries
= vzalloc(sizeof(struct kvm_cpuid_entry2
) * cpuid
->nent
);
705 for (i
= 0; i
< ARRAY_SIZE(param
); i
++) {
706 const struct kvm_cpuid_param
*ent
= ¶m
[i
];
708 if (ent
->qualifier
&& !ent
->qualifier(ent
))
711 r
= do_cpuid_ent(&cpuid_entries
[nent
], ent
->func
, ent
->idx
,
712 &nent
, cpuid
->nent
, type
);
717 if (!ent
->has_leaf_count
)
720 limit
= cpuid_entries
[nent
- 1].eax
;
721 for (func
= ent
->func
+ 1; func
<= limit
&& nent
< cpuid
->nent
&& r
== 0; ++func
)
722 r
= do_cpuid_ent(&cpuid_entries
[nent
], func
, ent
->idx
,
723 &nent
, cpuid
->nent
, type
);
730 if (copy_to_user(entries
, cpuid_entries
,
731 nent
* sizeof(struct kvm_cpuid_entry2
)))
737 vfree(cpuid_entries
);
742 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu
*vcpu
, int i
)
744 struct kvm_cpuid_entry2
*e
= &vcpu
->arch
.cpuid_entries
[i
];
745 int j
, nent
= vcpu
->arch
.cpuid_nent
;
747 e
->flags
&= ~KVM_CPUID_FLAG_STATE_READ_NEXT
;
748 /* when no next entry is found, the current entry[i] is reselected */
749 for (j
= i
+ 1; ; j
= (j
+ 1) % nent
) {
750 struct kvm_cpuid_entry2
*ej
= &vcpu
->arch
.cpuid_entries
[j
];
751 if (ej
->function
== e
->function
) {
752 ej
->flags
|= KVM_CPUID_FLAG_STATE_READ_NEXT
;
756 return 0; /* silence gcc, even though control never reaches here */
759 /* find an entry with matching function, matching index (if needed), and that
760 * should be read next (if it's stateful) */
761 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2
*e
,
762 u32 function
, u32 index
)
764 if (e
->function
!= function
)
766 if ((e
->flags
& KVM_CPUID_FLAG_SIGNIFCANT_INDEX
) && e
->index
!= index
)
768 if ((e
->flags
& KVM_CPUID_FLAG_STATEFUL_FUNC
) &&
769 !(e
->flags
& KVM_CPUID_FLAG_STATE_READ_NEXT
))
774 struct kvm_cpuid_entry2
*kvm_find_cpuid_entry(struct kvm_vcpu
*vcpu
,
775 u32 function
, u32 index
)
778 struct kvm_cpuid_entry2
*best
= NULL
;
780 for (i
= 0; i
< vcpu
->arch
.cpuid_nent
; ++i
) {
781 struct kvm_cpuid_entry2
*e
;
783 e
= &vcpu
->arch
.cpuid_entries
[i
];
784 if (is_matching_cpuid_entry(e
, function
, index
)) {
785 if (e
->flags
& KVM_CPUID_FLAG_STATEFUL_FUNC
)
786 move_to_next_stateful_cpuid_entry(vcpu
, i
);
793 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry
);
796 * If no match is found, check whether we exceed the vCPU's limit
797 * and return the content of the highest valid _standard_ leaf instead.
798 * This is to satisfy the CPUID specification.
800 static struct kvm_cpuid_entry2
* check_cpuid_limit(struct kvm_vcpu
*vcpu
,
801 u32 function
, u32 index
)
803 struct kvm_cpuid_entry2
*maxlevel
;
805 maxlevel
= kvm_find_cpuid_entry(vcpu
, function
& 0x80000000, 0);
806 if (!maxlevel
|| maxlevel
->eax
>= function
)
808 if (function
& 0x80000000) {
809 maxlevel
= kvm_find_cpuid_entry(vcpu
, 0, 0);
813 return kvm_find_cpuid_entry(vcpu
, maxlevel
->eax
, index
);
816 void kvm_cpuid(struct kvm_vcpu
*vcpu
, u32
*eax
, u32
*ebx
, u32
*ecx
, u32
*edx
)
818 u32 function
= *eax
, index
= *ecx
;
819 struct kvm_cpuid_entry2
*best
;
821 best
= kvm_find_cpuid_entry(vcpu
, function
, index
);
824 best
= check_cpuid_limit(vcpu
, function
, index
);
827 * Perfmon not yet supported for L2 guest.
829 if (is_guest_mode(vcpu
) && function
== 0xa)
838 *eax
= *ebx
= *ecx
= *edx
= 0;
839 trace_kvm_cpuid(function
, *eax
, *ebx
, *ecx
, *edx
);
841 EXPORT_SYMBOL_GPL(kvm_cpuid
);
843 void kvm_emulate_cpuid(struct kvm_vcpu
*vcpu
)
845 u32 function
, eax
, ebx
, ecx
, edx
;
847 function
= eax
= kvm_register_read(vcpu
, VCPU_REGS_RAX
);
848 ecx
= kvm_register_read(vcpu
, VCPU_REGS_RCX
);
849 kvm_cpuid(vcpu
, &eax
, &ebx
, &ecx
, &edx
);
850 kvm_register_write(vcpu
, VCPU_REGS_RAX
, eax
);
851 kvm_register_write(vcpu
, VCPU_REGS_RBX
, ebx
);
852 kvm_register_write(vcpu
, VCPU_REGS_RCX
, ecx
);
853 kvm_register_write(vcpu
, VCPU_REGS_RDX
, edx
);
854 kvm_x86_ops
->skip_emulated_instruction(vcpu
);
856 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid
);