KVM: x86: unify handling of interrupt window
[deliverable/linux.git] / arch / x86 / kvm / vmx.c
index 4a4eec30cc08c6924e370a6ab4ac56d9c03c6e69..6ee0dc69675b6e55412a5bbc053e89e00be93ec1 100644 (file)
@@ -809,7 +809,7 @@ static void kvm_cpu_vmxon(u64 addr);
 static void kvm_cpu_vmxoff(void);
 static bool vmx_mpx_supported(void);
 static bool vmx_xsaves_supported(void);
-static int vmx_vm_has_apicv(struct kvm *kvm);
+static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu);
 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
 static void vmx_set_segment(struct kvm_vcpu *vcpu,
                            struct kvm_segment *var, int seg);
@@ -946,9 +946,9 @@ static inline bool cpu_has_vmx_tpr_shadow(void)
        return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
 }
 
-static inline bool vm_need_tpr_shadow(struct kvm *kvm)
+static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
 {
-       return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
+       return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
 }
 
 static inline bool cpu_has_secondary_exec_ctrls(void)
@@ -1062,9 +1062,9 @@ static inline bool cpu_has_vmx_ple(void)
                SECONDARY_EXEC_PAUSE_LOOP_EXITING;
 }
 
-static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
+static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
 {
-       return flexpriority_enabled && irqchip_in_kernel(kvm);
+       return flexpriority_enabled && lapic_in_kernel(vcpu);
 }
 
 static inline bool cpu_has_vmx_vpid(void)
@@ -1264,7 +1264,7 @@ static void vmcs_load(struct vmcs *vmcs)
                       vmcs, phys_addr);
 }
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 /*
  * This bitmap is used to indicate whether the vmclear
  * operation is enabled on all cpus. All disabled by
@@ -1302,7 +1302,7 @@ static void crash_vmclear_local_loaded_vmcss(void)
 #else
 static inline void crash_enable_local_vmclear(int cpu) { }
 static inline void crash_disable_local_vmclear(int cpu) { }
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
 
 static void __loaded_vmcs_clear(void *arg)
 {
@@ -2377,7 +2377,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
        vmx->nested.nested_vmx_pinbased_ctls_high |=
                PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
                PIN_BASED_VMX_PREEMPTION_TIMER;
-       if (vmx_vm_has_apicv(vmx->vcpu.kvm))
+       if (vmx_cpu_uses_apicv(&vmx->vcpu))
                vmx->nested.nested_vmx_pinbased_ctls_high |=
                        PIN_BASED_POSTED_INTR;
 
@@ -3150,7 +3150,7 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
        struct page *pages;
        struct vmcs *vmcs;
 
-       pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
+       pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
        if (!pages)
                return NULL;
        vmcs = page_address(pages);
@@ -4332,9 +4332,9 @@ static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
                        msr, MSR_TYPE_W);
 }
 
-static int vmx_vm_has_apicv(struct kvm *kvm)
+static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu)
 {
-       return enable_apicv && irqchip_in_kernel(kvm);
+       return enable_apicv && lapic_in_kernel(vcpu);
 }
 
 static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
@@ -4514,7 +4514,7 @@ static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
 {
        u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
 
-       if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
+       if (!vmx_cpu_uses_apicv(&vmx->vcpu))
                pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
        return pin_based_exec_ctrl;
 }
@@ -4526,7 +4526,7 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
        if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
                exec_control &= ~CPU_BASED_MOV_DR_EXITING;
 
-       if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
+       if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
                exec_control &= ~CPU_BASED_TPR_SHADOW;
 #ifdef CONFIG_X86_64
                exec_control |= CPU_BASED_CR8_STORE_EXITING |
@@ -4543,7 +4543,7 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
 {
        u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
-       if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
+       if (!cpu_need_virtualize_apic_accesses(&vmx->vcpu))
                exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
        if (vmx->vpid == 0)
                exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
@@ -4557,7 +4557,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
                exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
        if (!ple_gap)
                exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
-       if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
+       if (!vmx_cpu_uses_apicv(&vmx->vcpu))
                exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
                                  SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
        exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
@@ -4618,7 +4618,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
                                vmx_secondary_exec_control(vmx));
        }
 
-       if (vmx_vm_has_apicv(vmx->vcpu.kvm)) {
+       if (vmx_cpu_uses_apicv(&vmx->vcpu)) {
                vmcs_write64(EOI_EXIT_BITMAP0, 0);
                vmcs_write64(EOI_EXIT_BITMAP1, 0);
                vmcs_write64(EOI_EXIT_BITMAP2, 0);
@@ -4762,7 +4762,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 
        if (cpu_has_vmx_tpr_shadow() && !init_event) {
                vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
-               if (vm_need_tpr_shadow(vcpu->kvm))
+               if (cpu_need_tpr_shadow(vcpu))
                        vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
                                     __pa(vcpu->arch.apic->regs));
                vmcs_write32(TPR_THRESHOLD, 0);
@@ -4770,7 +4770,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 
        kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
 
-       if (vmx_vm_has_apicv(vcpu->kvm))
+       if (vmx_cpu_uses_apicv(vcpu))
                memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
 
        if (vmx->vpid != 0)
@@ -5310,7 +5310,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
                                u8 cr8 = (u8)val;
                                err = kvm_set_cr8(vcpu, cr8);
                                kvm_complete_insn_gp(vcpu, err);
-                               if (irqchip_in_kernel(vcpu->kvm))
+                               if (lapic_in_kernel(vcpu))
                                        return 1;
                                if (cr8_prev <= cr8)
                                        return 1;
@@ -5524,17 +5524,6 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
        kvm_make_request(KVM_REQ_EVENT, vcpu);
 
        ++vcpu->stat.irq_window_exits;
-
-       /*
-        * If the user space waits to inject interrupts, exit as soon as
-        * possible
-        */
-       if (!irqchip_in_kernel(vcpu->kvm) &&
-           vcpu->run->request_interrupt_window &&
-           !kvm_cpu_has_interrupt(vcpu)) {
-               vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
-               return 0;
-       }
        return 1;
 }
 
@@ -6064,6 +6053,8 @@ static __init int hardware_setup(void)
        memcpy(vmx_msr_bitmap_longmode_x2apic,
                        vmx_msr_bitmap_longmode, PAGE_SIZE);
 
+       set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
+
        if (enable_apicv) {
                for (msr = 0x800; msr <= 0x8ff; msr++)
                        vmx_disable_intercept_msr_read_x2apic(msr);
@@ -7936,10 +7927,10 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
         * apicv
         */
        if (!cpu_has_vmx_virtualize_x2apic_mode() ||
-                               !vmx_vm_has_apicv(vcpu->kvm))
+                               !vmx_cpu_uses_apicv(vcpu))
                return;
 
-       if (!vm_need_tpr_shadow(vcpu->kvm))
+       if (!cpu_need_tpr_shadow(vcpu))
                return;
 
        sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
@@ -8041,9 +8032,10 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
        }
 }
 
-static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu)
 {
-       if (!vmx_vm_has_apicv(vcpu->kvm))
+       u64 *eoi_exit_bitmap = vcpu->arch.eoi_exit_bitmap;
+       if (!vmx_cpu_uses_apicv(vcpu))
                return;
 
        vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
@@ -8542,7 +8534,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
        put_cpu();
        if (err)
                goto free_vmcs;
-       if (vm_need_virtualize_apic_accesses(kvm)) {
+       if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
                err = alloc_apic_access_page(kvm);
                if (err)
                        goto free_vmcs;
@@ -8615,17 +8607,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
        u64 ipat = 0;
 
        /* For VT-d and EPT combination
-        * 1. MMIO: guest may want to apply WC, trust it.
+        * 1. MMIO: always map as UC
         * 2. EPT with VT-d:
         *   a. VT-d without snooping control feature: can't guarantee the
-        *      result, try to trust guest.  So the same as item 1.
+        *      result, try to trust guest.
         *   b. VT-d with snooping control feature: snooping control feature of
         *      VT-d engine can guarantee the cache correctness. Just set it
         *      to WB to keep consistent with host. So the same as item 3.
         * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
         *    consistent with host MTRR
         */
-       if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
+       if (is_mmio) {
+               cache = MTRR_TYPE_UNCACHABLE;
+               goto exit;
+       }
+
+       if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
                ipat = VMX_EPT_IPAT_BIT;
                cache = MTRR_TYPE_WRBACK;
                goto exit;
@@ -9330,7 +9327,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
                                vmcs_write64(APIC_ACCESS_ADDR,
                                  page_to_phys(vmx->nested.apic_access_page));
                } else if (!(nested_cpu_has_virt_x2apic_mode(vmcs12)) &&
-                           (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))) {
+                           cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
                        exec_control |=
                                SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
                        kvm_vcpu_reload_apic_access_page(vcpu);
@@ -10354,7 +10351,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .update_cr8_intercept = update_cr8_intercept,
        .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
        .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
-       .vm_has_apicv = vmx_vm_has_apicv,
+       .cpu_uses_apicv = vmx_cpu_uses_apicv,
        .load_eoi_exitmap = vmx_load_eoi_exitmap,
        .hwapic_irr_update = vmx_hwapic_irr_update,
        .hwapic_isr_update = vmx_hwapic_isr_update,
@@ -10411,7 +10408,7 @@ static int __init vmx_init(void)
        if (r)
                return r;
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
        rcu_assign_pointer(crash_vmclear_loaded_vmcss,
                           crash_vmclear_local_loaded_vmcss);
 #endif
@@ -10421,7 +10418,7 @@ static int __init vmx_init(void)
 
 static void __exit vmx_exit(void)
 {
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
        RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
        synchronize_rcu();
 #endif
This page took 0.049361 seconds and 5 git commands to generate.