kvm: vmx: ensure VMCS is current while enabling PML
[deliverable/linux.git] / arch / x86 / kvm / vmx.c
index 64a79f271276a025ea76759189cddfbe967ae1ed..e34965b37a883cb2f89c7ec7cb348bb9f152436c 100644 (file)
@@ -4979,6 +4979,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        if (vmx_xsaves_supported())
                vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
 
+       if (enable_pml) {
+               ASSERT(vmx->pml_pg);
+               vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+               vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+       }
+
        return 0;
 }
 
@@ -7937,22 +7943,6 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
        *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
 }
 
-static int vmx_create_pml_buffer(struct vcpu_vmx *vmx)
-{
-       struct page *pml_pg;
-
-       pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
-       if (!pml_pg)
-               return -ENOMEM;
-
-       vmx->pml_pg = pml_pg;
-
-       vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
-       vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
-
-       return 0;
-}
-
 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
 {
        if (vmx->pml_pg) {
@@ -8885,14 +8875,26 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
        if (err)
                goto free_vcpu;
 
+       err = -ENOMEM;
+
+       /*
+        * If PML is turned on, failure on enabling PML just results in failure
+        * of creating the vcpu, therefore we can simplify PML logic (by
+        * avoiding dealing with cases, such as enabling PML partially on vcpus
+        * for the guest, etc.
+        */
+       if (enable_pml) {
+               vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               if (!vmx->pml_pg)
+                       goto uninit_vcpu;
+       }
+
        vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
        BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
                     > PAGE_SIZE);
 
-       err = -ENOMEM;
-       if (!vmx->guest_msrs) {
-               goto uninit_vcpu;
-       }
+       if (!vmx->guest_msrs)
+               goto free_pml;
 
        vmx->loaded_vmcs = &vmx->vmcs01;
        vmx->loaded_vmcs->vmcs = alloc_vmcs();
@@ -8936,18 +8938,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
        vmx->nested.current_vmptr = -1ull;
        vmx->nested.current_vmcs12 = NULL;
 
-       /*
-        * If PML is turned on, failure on enabling PML just results in failure
-        * of creating the vcpu, therefore we can simplify PML logic (by
-        * avoiding dealing with cases, such as enabling PML partially on vcpus
-        * for the guest, etc.
-        */
-       if (enable_pml) {
-               err = vmx_create_pml_buffer(vmx);
-               if (err)
-                       goto free_vmcs;
-       }
-
        return &vmx->vcpu;
 
 free_vmcs:
@@ -8955,6 +8945,8 @@ free_vmcs:
        free_loaded_vmcs(vmx->loaded_vmcs);
 free_msrs:
        kfree(vmx->guest_msrs);
+free_pml:
+       vmx_destroy_pml_buffer(vmx);
 uninit_vcpu:
        kvm_vcpu_uninit(&vmx->vcpu);
 free_vcpu:
This page took 0.024851 seconds and 5 git commands to generate.