KVM: x86: fix CR8 handling
[deliverable/linux.git] / arch / x86 / kvm / svm.c
index 1e7bb9c770840d5468cc06b0bccd74f8fc33a5e5..3d4b88af50f9db230fbfb0ba2a5a93d9ada61bae 100644 (file)
@@ -99,8 +99,7 @@ struct nested_state {
 
        /* cache for intercepts of the guest */
        u32 intercept_cr;
-       u16 intercept_dr_read;
-       u16 intercept_dr_write;
+       u32 intercept_dr;
        u32 intercept_exceptions;
        u64 intercept;
 
@@ -186,6 +185,41 @@ static int nested_svm_vmexit(struct vcpu_svm *svm);
 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
                                      bool has_error_code, u32 error_code);
 
+enum {
+       VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
+                           pause filter count */
+       VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
+       VMCB_ASID,       /* ASID */
+       VMCB_INTR,       /* int_ctl, int_vector */
+       VMCB_NPT,        /* npt_en, nCR3, gPAT */
+       VMCB_CR,         /* CR0, CR3, CR4, EFER */
+       VMCB_DR,         /* DR6, DR7 */
+       VMCB_DT,         /* GDT, IDT */
+       VMCB_SEG,        /* CS, DS, SS, ES, CPL */
+       VMCB_CR2,        /* CR2 only */
+       VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
+       VMCB_DIRTY_MAX,
+};
+
+/* TPR and CR2 are always written before VMRUN */
+#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
+
+static inline void mark_all_dirty(struct vmcb *vmcb)
+{
+       vmcb->control.clean = 0;
+}
+
+static inline void mark_all_clean(struct vmcb *vmcb)
+{
+       vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
+                              & ~VMCB_ALWAYS_DIRTY_MASK;
+}
+
+static inline void mark_dirty(struct vmcb *vmcb, int bit)
+{
+       vmcb->control.clean &= ~(1 << bit);
+}
+
 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
 {
        return container_of(vcpu, struct vcpu_svm, vcpu);
@@ -196,6 +230,8 @@ static void recalc_intercepts(struct vcpu_svm *svm)
        struct vmcb_control_area *c, *h;
        struct nested_state *g;
 
+       mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+
        if (!is_guest_mode(&svm->vcpu))
                return;
 
@@ -204,8 +240,7 @@ static void recalc_intercepts(struct vcpu_svm *svm)
        g = &svm->nested;
 
        c->intercept_cr = h->intercept_cr | g->intercept_cr;
-       c->intercept_dr_read = h->intercept_dr_read | g->intercept_dr_read;
-       c->intercept_dr_write = h->intercept_dr_write | g->intercept_dr_write;
+       c->intercept_dr = h->intercept_dr | g->intercept_dr;
        c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
        c->intercept = h->intercept | g->intercept;
 }
@@ -243,6 +278,60 @@ static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
        return vmcb->control.intercept_cr & (1U << bit);
 }
 
+static inline void set_dr_intercept(struct vcpu_svm *svm, int bit)
+{
+       struct vmcb *vmcb = get_host_vmcb(svm);
+
+       vmcb->control.intercept_dr |= (1U << bit);
+
+       recalc_intercepts(svm);
+}
+
+static inline void clr_dr_intercept(struct vcpu_svm *svm, int bit)
+{
+       struct vmcb *vmcb = get_host_vmcb(svm);
+
+       vmcb->control.intercept_dr &= ~(1U << bit);
+
+       recalc_intercepts(svm);
+}
+
+static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
+{
+       struct vmcb *vmcb = get_host_vmcb(svm);
+
+       vmcb->control.intercept_exceptions |= (1U << bit);
+
+       recalc_intercepts(svm);
+}
+
+static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
+{
+       struct vmcb *vmcb = get_host_vmcb(svm);
+
+       vmcb->control.intercept_exceptions &= ~(1U << bit);
+
+       recalc_intercepts(svm);
+}
+
+static inline void set_intercept(struct vcpu_svm *svm, int bit)
+{
+       struct vmcb *vmcb = get_host_vmcb(svm);
+
+       vmcb->control.intercept |= (1ULL << bit);
+
+       recalc_intercepts(svm);
+}
+
+static inline void clr_intercept(struct vcpu_svm *svm, int bit)
+{
+       struct vmcb *vmcb = get_host_vmcb(svm);
+
+       vmcb->control.intercept &= ~(1ULL << bit);
+
+       recalc_intercepts(svm);
+}
+
 static inline void enable_gif(struct vcpu_svm *svm)
 {
        svm->vcpu.arch.hflags |= HF_GIF_MASK;
@@ -332,16 +421,6 @@ static inline void invlpga(unsigned long addr, u32 asid)
        asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
 }
 
-static inline void force_new_asid(struct kvm_vcpu *vcpu)
-{
-       to_svm(vcpu)->asid_generation--;
-}
-
-static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
-{
-       force_new_asid(vcpu);
-}
-
 static int get_npt_level(void)
 {
 #ifdef CONFIG_X86_64
@@ -358,6 +437,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
                efer &= ~EFER_LME;
 
        to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
+       mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
 }
 
 static int is_external_interrupt(u32 info)
@@ -780,6 +860,8 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
        }
 
        svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
+
+       mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
 static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
@@ -789,6 +871,7 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
        svm->vmcb->control.tsc_offset += adjustment;
        if (is_guest_mode(vcpu))
                svm->nested.hsave->control.tsc_offset += adjustment;
+       mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
 static void init_vmcb(struct vcpu_svm *svm)
@@ -807,52 +890,52 @@ static void init_vmcb(struct vcpu_svm *svm)
        set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
        set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 
-       control->intercept_dr_read =    INTERCEPT_DR0_MASK |
-                                       INTERCEPT_DR1_MASK |
-                                       INTERCEPT_DR2_MASK |
-                                       INTERCEPT_DR3_MASK |
-                                       INTERCEPT_DR4_MASK |
-                                       INTERCEPT_DR5_MASK |
-                                       INTERCEPT_DR6_MASK |
-                                       INTERCEPT_DR7_MASK;
-
-       control->intercept_dr_write =   INTERCEPT_DR0_MASK |
-                                       INTERCEPT_DR1_MASK |
-                                       INTERCEPT_DR2_MASK |
-                                       INTERCEPT_DR3_MASK |
-                                       INTERCEPT_DR4_MASK |
-                                       INTERCEPT_DR5_MASK |
-                                       INTERCEPT_DR6_MASK |
-                                       INTERCEPT_DR7_MASK;
-
-       control->intercept_exceptions = (1 << PF_VECTOR) |
-                                       (1 << UD_VECTOR) |
-                                       (1 << MC_VECTOR);
-
-
-       control->intercept =    (1ULL << INTERCEPT_INTR) |
-                               (1ULL << INTERCEPT_NMI) |
-                               (1ULL << INTERCEPT_SMI) |
-                               (1ULL << INTERCEPT_SELECTIVE_CR0) |
-                               (1ULL << INTERCEPT_CPUID) |
-                               (1ULL << INTERCEPT_INVD) |
-                               (1ULL << INTERCEPT_HLT) |
-                               (1ULL << INTERCEPT_INVLPG) |
-                               (1ULL << INTERCEPT_INVLPGA) |
-                               (1ULL << INTERCEPT_IOIO_PROT) |
-                               (1ULL << INTERCEPT_MSR_PROT) |
-                               (1ULL << INTERCEPT_TASK_SWITCH) |
-                               (1ULL << INTERCEPT_SHUTDOWN) |
-                               (1ULL << INTERCEPT_VMRUN) |
-                               (1ULL << INTERCEPT_VMMCALL) |
-                               (1ULL << INTERCEPT_VMLOAD) |
-                               (1ULL << INTERCEPT_VMSAVE) |
-                               (1ULL << INTERCEPT_STGI) |
-                               (1ULL << INTERCEPT_CLGI) |
-                               (1ULL << INTERCEPT_SKINIT) |
-                               (1ULL << INTERCEPT_WBINVD) |
-                               (1ULL << INTERCEPT_MONITOR) |
-                               (1ULL << INTERCEPT_MWAIT);
+       set_dr_intercept(svm, INTERCEPT_DR0_READ);
+       set_dr_intercept(svm, INTERCEPT_DR1_READ);
+       set_dr_intercept(svm, INTERCEPT_DR2_READ);
+       set_dr_intercept(svm, INTERCEPT_DR3_READ);
+       set_dr_intercept(svm, INTERCEPT_DR4_READ);
+       set_dr_intercept(svm, INTERCEPT_DR5_READ);
+       set_dr_intercept(svm, INTERCEPT_DR6_READ);
+       set_dr_intercept(svm, INTERCEPT_DR7_READ);
+
+       set_dr_intercept(svm, INTERCEPT_DR0_WRITE);
+       set_dr_intercept(svm, INTERCEPT_DR1_WRITE);
+       set_dr_intercept(svm, INTERCEPT_DR2_WRITE);
+       set_dr_intercept(svm, INTERCEPT_DR3_WRITE);
+       set_dr_intercept(svm, INTERCEPT_DR4_WRITE);
+       set_dr_intercept(svm, INTERCEPT_DR5_WRITE);
+       set_dr_intercept(svm, INTERCEPT_DR6_WRITE);
+       set_dr_intercept(svm, INTERCEPT_DR7_WRITE);
+
+       set_exception_intercept(svm, PF_VECTOR);
+       set_exception_intercept(svm, UD_VECTOR);
+       set_exception_intercept(svm, MC_VECTOR);
+
+       set_intercept(svm, INTERCEPT_INTR);
+       set_intercept(svm, INTERCEPT_NMI);
+       set_intercept(svm, INTERCEPT_SMI);
+       set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
+       set_intercept(svm, INTERCEPT_CPUID);
+       set_intercept(svm, INTERCEPT_INVD);
+       set_intercept(svm, INTERCEPT_HLT);
+       set_intercept(svm, INTERCEPT_INVLPG);
+       set_intercept(svm, INTERCEPT_INVLPGA);
+       set_intercept(svm, INTERCEPT_IOIO_PROT);
+       set_intercept(svm, INTERCEPT_MSR_PROT);
+       set_intercept(svm, INTERCEPT_TASK_SWITCH);
+       set_intercept(svm, INTERCEPT_SHUTDOWN);
+       set_intercept(svm, INTERCEPT_VMRUN);
+       set_intercept(svm, INTERCEPT_VMMCALL);
+       set_intercept(svm, INTERCEPT_VMLOAD);
+       set_intercept(svm, INTERCEPT_VMSAVE);
+       set_intercept(svm, INTERCEPT_STGI);
+       set_intercept(svm, INTERCEPT_CLGI);
+       set_intercept(svm, INTERCEPT_SKINIT);
+       set_intercept(svm, INTERCEPT_WBINVD);
+       set_intercept(svm, INTERCEPT_MONITOR);
+       set_intercept(svm, INTERCEPT_MWAIT);
+       set_intercept(svm, INTERCEPT_XSETBV);
 
        control->iopm_base_pa = iopm_base;
        control->msrpm_base_pa = __pa(svm->msrpm);
@@ -903,25 +986,27 @@ static void init_vmcb(struct vcpu_svm *svm)
        if (npt_enabled) {
                /* Setup VMCB for Nested Paging */
                control->nested_ctl = 1;
-               control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
-                                       (1ULL << INTERCEPT_INVLPG));
-               control->intercept_exceptions &= ~(1 << PF_VECTOR);
+               clr_intercept(svm, INTERCEPT_TASK_SWITCH);
+               clr_intercept(svm, INTERCEPT_INVLPG);
+               clr_exception_intercept(svm, PF_VECTOR);
                clr_cr_intercept(svm, INTERCEPT_CR3_READ);
                clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
                save->g_pat = 0x0007040600070406ULL;
                save->cr3 = 0;
                save->cr4 = 0;
        }
-       force_new_asid(&svm->vcpu);
+       svm->asid_generation = 0;
 
        svm->nested.vmcb = 0;
        svm->vcpu.arch.hflags = 0;
 
        if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
                control->pause_filter_count = 3000;
-               control->intercept |= (1ULL << INTERCEPT_PAUSE);
+               set_intercept(svm, INTERCEPT_PAUSE);
        }
 
+       mark_all_dirty(svm->vmcb);
+
        enable_gif(svm);
 }
 
@@ -1038,6 +1123,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        if (unlikely(cpu != vcpu->cpu)) {
                svm->asid_generation = 0;
+               mark_all_dirty(svm->vmcb);
        }
 
 #ifdef CONFIG_X86_64
@@ -1093,12 +1179,12 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
 
 static void svm_set_vintr(struct vcpu_svm *svm)
 {
-       svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
+       set_intercept(svm, INTERCEPT_VINTR);
 }
 
 static void svm_clear_vintr(struct vcpu_svm *svm)
 {
-       svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
+       clr_intercept(svm, INTERCEPT_VINTR);
 }
 
 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
@@ -1213,6 +1299,7 @@ static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 
        svm->vmcb->save.idtr.limit = dt->size;
        svm->vmcb->save.idtr.base = dt->address ;
+       mark_dirty(svm->vmcb, VMCB_DT);
 }
 
 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
@@ -1229,6 +1316,7 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 
        svm->vmcb->save.gdtr.limit = dt->size;
        svm->vmcb->save.gdtr.base = dt->address ;
+       mark_dirty(svm->vmcb, VMCB_DT);
 }
 
 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
@@ -1250,6 +1338,7 @@ static void update_cr0_intercept(struct vcpu_svm *svm)
                *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
                        | (gcr0 & SVM_CR0_SELECTIVE_MASK);
 
+       mark_dirty(svm->vmcb, VMCB_CR);
 
        if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
                clr_cr_intercept(svm, INTERCEPT_CR0_READ);
@@ -1316,6 +1405,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
         */
        cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
        svm->vmcb->save.cr0 = cr0;
+       mark_dirty(svm->vmcb, VMCB_CR);
        update_cr0_intercept(svm);
 }
 
@@ -1325,13 +1415,14 @@ static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
 
        if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
-               force_new_asid(vcpu);
+               svm_flush_tlb(vcpu);
 
        vcpu->arch.cr4 = cr4;
        if (!npt_enabled)
                cr4 |= X86_CR4_PAE;
        cr4 |= host_cr4_mce;
        to_svm(vcpu)->vmcb->save.cr4 = cr4;
+       mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
 }
 
 static void svm_set_segment(struct kvm_vcpu *vcpu,
@@ -1360,26 +1451,25 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
                        = (svm->vmcb->save.cs.attrib
                           >> SVM_SELECTOR_DPL_SHIFT) & 3;
 
+       mark_dirty(svm->vmcb, VMCB_SEG);
 }
 
 static void update_db_intercept(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       svm->vmcb->control.intercept_exceptions &=
-               ~((1 << DB_VECTOR) | (1 << BP_VECTOR));
+       clr_exception_intercept(svm, DB_VECTOR);
+       clr_exception_intercept(svm, BP_VECTOR);
 
        if (svm->nmi_singlestep)
-               svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR);
+               set_exception_intercept(svm, DB_VECTOR);
 
        if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
                if (vcpu->guest_debug &
                    (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
-                       svm->vmcb->control.intercept_exceptions |=
-                               1 << DB_VECTOR;
+                       set_exception_intercept(svm, DB_VECTOR);
                if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
-                       svm->vmcb->control.intercept_exceptions |=
-                               1 << BP_VECTOR;
+                       set_exception_intercept(svm, BP_VECTOR);
        } else
                vcpu->guest_debug = 0;
 }
@@ -1393,6 +1483,8 @@ static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
        else
                svm->vmcb->save.dr7 = vcpu->arch.dr7;
 
+       mark_dirty(svm->vmcb, VMCB_DR);
+
        update_db_intercept(vcpu);
 }
 
@@ -1406,6 +1498,8 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
 
        svm->asid_generation = sd->asid_generation;
        svm->vmcb->control.asid = sd->next_asid++;
+
+       mark_dirty(svm->vmcb, VMCB_ASID);
 }
 
 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
@@ -1413,6 +1507,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
        struct vcpu_svm *svm = to_svm(vcpu);
 
        svm->vmcb->save.dr7 = value;
+       mark_dirty(svm->vmcb, VMCB_DR);
 }
 
 static int pf_interception(struct vcpu_svm *svm)
@@ -1500,21 +1595,8 @@ static int ud_interception(struct vcpu_svm *svm)
 static void svm_fpu_activate(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
-       u32 excp;
-
-       if (is_guest_mode(vcpu)) {
-               u32 h_excp, n_excp;
-
-               h_excp  = svm->nested.hsave->control.intercept_exceptions;
-               n_excp  = svm->nested.intercept_exceptions;
-               h_excp &= ~(1 << NM_VECTOR);
-               excp    = h_excp | n_excp;
-       } else {
-               excp  = svm->vmcb->control.intercept_exceptions;
-               excp &= ~(1 << NM_VECTOR);
-       }
 
-       svm->vmcb->control.intercept_exceptions = excp;
+       clr_exception_intercept(svm, NM_VECTOR);
 
        svm->vcpu.fpu_active = 1;
        update_cr0_intercept(svm);
@@ -1675,7 +1757,8 @@ static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
        struct vcpu_svm *svm = to_svm(vcpu);
 
        svm->vmcb->control.nested_cr3 = root;
-       force_new_asid(vcpu);
+       mark_dirty(svm->vmcb, VMCB_NPT);
+       svm_flush_tlb(vcpu);
 }
 
 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
@@ -1923,15 +2006,9 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
                        vmexit = NESTED_EXIT_DONE;
                break;
        }
-       case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
-               u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
-               if (svm->nested.intercept_dr_read & dr_bits)
-                       vmexit = NESTED_EXIT_DONE;
-               break;
-       }
-       case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
-               u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
-               if (svm->nested.intercept_dr_write & dr_bits)
+       case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
+               u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
+               if (svm->nested.intercept_dr & bit)
                        vmexit = NESTED_EXIT_DONE;
                break;
        }
@@ -1977,8 +2054,7 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr
        struct vmcb_control_area *from = &from_vmcb->control;
 
        dst->intercept_cr         = from->intercept_cr;
-       dst->intercept_dr_read    = from->intercept_dr_read;
-       dst->intercept_dr_write   = from->intercept_dr_write;
+       dst->intercept_dr         = from->intercept_dr;
        dst->intercept_exceptions = from->intercept_exceptions;
        dst->intercept            = from->intercept;
        dst->iopm_base_pa         = from->iopm_base_pa;
@@ -2111,6 +2187,8 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
        svm->vmcb->save.cpl = 0;
        svm->vmcb->control.exit_int_info = 0;
 
+       mark_all_dirty(svm->vmcb);
+
        nested_svm_unmap(page);
 
        nested_svm_uninit_mmu_context(&svm->vcpu);
@@ -2280,12 +2358,11 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
 
        /* cache intercepts */
        svm->nested.intercept_cr         = nested_vmcb->control.intercept_cr;
-       svm->nested.intercept_dr_read    = nested_vmcb->control.intercept_dr_read;
-       svm->nested.intercept_dr_write   = nested_vmcb->control.intercept_dr_write;
+       svm->nested.intercept_dr         = nested_vmcb->control.intercept_dr;
        svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
        svm->nested.intercept            = nested_vmcb->control.intercept;
 
-       force_new_asid(&svm->vcpu);
+       svm_flush_tlb(&svm->vcpu);
        svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
        if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
                svm->vcpu.arch.hflags |= HF_VINTR_MASK;
@@ -2299,7 +2376,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
        }
 
        /* We don't want to see VMMCALLs from a nested guest */
-       svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMMCALL);
+       clr_intercept(svm, INTERCEPT_VMMCALL);
 
        svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
        svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
@@ -2323,6 +2400,8 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
 
        enable_gif(svm);
 
+       mark_all_dirty(svm->vmcb);
+
        return true;
 }
 
@@ -2440,6 +2519,8 @@ static int clgi_interception(struct vcpu_svm *svm)
        svm_clear_vintr(svm);
        svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
 
+       mark_dirty(svm->vmcb, VMCB_INTR);
+
        return 1;
 }
 
@@ -2466,6 +2547,19 @@ static int skinit_interception(struct vcpu_svm *svm)
        return 1;
 }
 
+static int xsetbv_interception(struct vcpu_svm *svm)
+{
+       u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
+       u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
+
+       if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
+               svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
+               skip_emulated_instruction(&svm->vcpu);
+       }
+
+       return 1;
+}
+
 static int invalid_op_interception(struct vcpu_svm *svm)
 {
        kvm_queue_exception(&svm->vcpu, UD_VECTOR);
@@ -2547,7 +2641,7 @@ static int cpuid_interception(struct vcpu_svm *svm)
 static int iret_interception(struct vcpu_svm *svm)
 {
        ++svm->vcpu.stat.nmi_window_exits;
-       svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
+       clr_intercept(svm, INTERCEPT_IRET);
        svm->vcpu.arch.hflags |= HF_IRET_MASK;
        return 1;
 }
@@ -2582,16 +2676,17 @@ static int cr0_write_interception(struct vcpu_svm *svm)
 static int cr8_write_interception(struct vcpu_svm *svm)
 {
        struct kvm_run *kvm_run = svm->vcpu.run;
+       int r;
 
        u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
        /* instruction emulation calls kvm_set_cr8() */
-       emulate_instruction(&svm->vcpu, 0, 0, 0);
+       r = emulate_instruction(&svm->vcpu, 0, 0, 0);
        if (irqchip_in_kernel(svm->vcpu.kvm)) {
                clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
-               return 1;
+               return r == EMULATE_DONE;
        }
        if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
-               return 1;
+               return r == EMULATE_DONE;
        kvm_run->exit_reason = KVM_EXIT_SET_TPR;
        return 0;
 }
@@ -2602,14 +2697,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
 
        switch (ecx) {
        case MSR_IA32_TSC: {
-               u64 tsc_offset;
-
-               if (is_guest_mode(vcpu))
-                       tsc_offset = svm->nested.hsave->control.tsc_offset;
-               else
-                       tsc_offset = svm->vmcb->control.tsc_offset;
+               struct vmcb *vmcb = get_host_vmcb(svm);
 
-               *data = tsc_offset + native_read_tsc();
+               *data = vmcb->control.tsc_offset + native_read_tsc();
                break;
        }
        case MSR_STAR:
@@ -2763,6 +2853,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
                        return 1;
 
                svm->vmcb->save.dbgctl = data;
+               mark_dirty(svm->vmcb, VMCB_LBR);
                if (data & (1ULL<<0))
                        svm_enable_lbrv(svm);
                else
@@ -2815,6 +2906,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
        kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
        svm_clear_vintr(svm);
        svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
+       mark_dirty(svm->vmcb, VMCB_INTR);
        /*
         * If the user space waits to inject interrupts, exit as soon as
         * possible
@@ -2894,6 +2986,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
        [SVM_EXIT_WBINVD]                       = emulate_on_interception,
        [SVM_EXIT_MONITOR]                      = invalid_op_interception,
        [SVM_EXIT_MWAIT]                        = invalid_op_interception,
+       [SVM_EXIT_XSETBV]                       = xsetbv_interception,
        [SVM_EXIT_NPF]                          = pf_interception,
 };
 
@@ -2906,8 +2999,8 @@ void dump_vmcb(struct kvm_vcpu *vcpu)
        pr_err("VMCB Control Area:\n");
        pr_err("cr_read:            %04x\n", control->intercept_cr & 0xffff);
        pr_err("cr_write:           %04x\n", control->intercept_cr >> 16);
-       pr_err("dr_read:            %04x\n", control->intercept_dr_read);
-       pr_err("dr_write:           %04x\n", control->intercept_dr_write);
+       pr_err("dr_read:            %04x\n", control->intercept_dr & 0xffff);
+       pr_err("dr_write:           %04x\n", control->intercept_dr >> 16);
        pr_err("exceptions:         %08x\n", control->intercept_exceptions);
        pr_err("intercepts:         %016llx\n", control->intercept);
        pr_err("pause filter count: %d\n", control->pause_filter_count);
@@ -3081,7 +3174,6 @@ static void pre_svm_run(struct vcpu_svm *svm)
 
        struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
 
-       svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
        /* FIXME: handle wraparound of asid_generation */
        if (svm->asid_generation != sd->asid_generation)
                new_asid(svm, sd);
@@ -3093,7 +3185,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
 
        svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
        vcpu->arch.hflags |= HF_NMI_MASK;
-       svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
+       set_intercept(svm, INTERCEPT_IRET);
        ++vcpu->stat.nmi_injections;
 }
 
@@ -3106,6 +3198,7 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
        control->int_ctl &= ~V_INTR_PRIO_MASK;
        control->int_ctl |= V_IRQ_MASK |
                ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
+       mark_dirty(svm->vmcb, VMCB_INTR);
 }
 
 static void svm_set_irq(struct kvm_vcpu *vcpu)
@@ -3160,10 +3253,10 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
 
        if (masked) {
                svm->vcpu.arch.hflags |= HF_NMI_MASK;
-               svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
+               set_intercept(svm, INTERCEPT_IRET);
        } else {
                svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
-               svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
+               clr_intercept(svm, INTERCEPT_IRET);
        }
 }
 
@@ -3225,7 +3318,12 @@ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
 
 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
 {
-       force_new_asid(vcpu);
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
+               svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
+       else
+               svm->asid_generation--;
 }
 
 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
@@ -3451,6 +3549,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 
        svm->next_rip = 0;
 
+       svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
+
        /* if exit due to PF check for async PF */
        if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
                svm->apf_reason = kvm_read_and_reset_pf_reason();
@@ -3467,6 +3567,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        if (unlikely(svm->vmcb->control.exit_code ==
                     SVM_EXIT_EXCP_BASE + MC_VECTOR))
                svm_handle_mce(svm);
+
+       mark_all_clean(svm->vmcb);
 }
 
 #undef R
@@ -3476,7 +3578,8 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
        struct vcpu_svm *svm = to_svm(vcpu);
 
        svm->vmcb->save.cr3 = root;
-       force_new_asid(vcpu);
+       mark_dirty(svm->vmcb, VMCB_CR);
+       svm_flush_tlb(vcpu);
 }
 
 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
@@ -3484,11 +3587,13 @@ static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
        struct vcpu_svm *svm = to_svm(vcpu);
 
        svm->vmcb->control.nested_cr3 = root;
+       mark_dirty(svm->vmcb, VMCB_NPT);
 
        /* Also sync guest cr3 here in case we live migrate */
        svm->vmcb->save.cr3 = vcpu->arch.cr3;
+       mark_dirty(svm->vmcb, VMCB_CR);
 
-       force_new_asid(vcpu);
+       svm_flush_tlb(vcpu);
 }
 
 static int is_disabled(void)
@@ -3535,10 +3640,6 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
 {
        switch (func) {
-       case 0x00000001:
-               /* Mask out xsave bit as long as it is not supported by SVM */
-               entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
-               break;
        case 0x80000001:
                if (nested)
                        entry->ecx |= (1 << 2); /* Set SVM bit */
@@ -3612,6 +3713,7 @@ static const struct trace_print_flags svm_exit_reasons_str[] = {
        { SVM_EXIT_WBINVD,                      "wbinvd" },
        { SVM_EXIT_MONITOR,                     "monitor" },
        { SVM_EXIT_MWAIT,                       "mwait" },
+       { SVM_EXIT_XSETBV,                      "xsetbv" },
        { SVM_EXIT_NPF,                         "npf" },
        { -1, NULL }
 };
@@ -3635,9 +3737,7 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
-       if (is_guest_mode(vcpu))
-               svm->nested.hsave->control.intercept_exceptions |= 1 << NM_VECTOR;
+       set_exception_intercept(svm, NM_VECTOR);
        update_cr0_intercept(svm);
 }
 
This page took 0.037398 seconds and 5 git commands to generate.