KVM: x86: check for cr3 validity in mmu_alloc_roots
[deliverable/linux.git] / arch / x86 / kvm / x86.c
index da3e65bfc6ee9e2919661a5e7c86ace1000b969c..3244437e67b35aa61dddfc84b944a0a565cb4435 100644 (file)
@@ -1247,44 +1247,53 @@ static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
        entry->flags = 0;
 }
 
+#define F(x) bit(X86_FEATURE_##x)
+
 static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                         u32 index, int *nent, int maxnent)
 {
-       const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
-               bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
-               bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
-               bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
-               bit(X86_FEATURE_MCE) |
-               bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
-               bit(X86_FEATURE_SEP) | bit(X86_FEATURE_MTRR) |
-               bit(X86_FEATURE_PGE) | bit(X86_FEATURE_MCA) |
-               bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PAT) |
-               bit(X86_FEATURE_PSE36) |
-               bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
-               bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
-               bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
-       const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
-               bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
-               bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
-               bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
-               bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
-               bit(X86_FEATURE_PGE) |
-               bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
-               bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
-               bit(X86_FEATURE_SYSCALL) |
-               (is_efer_nx() ? bit(X86_FEATURE_NX) : 0) |
+       unsigned f_nx = is_efer_nx() ? F(NX) : 0;
 #ifdef CONFIG_X86_64
-               bit(X86_FEATURE_LM) |
+       unsigned f_lm = F(LM);
+#else
+       unsigned f_lm = 0;
 #endif
-               bit(X86_FEATURE_FXSR_OPT) |
-               bit(X86_FEATURE_MMXEXT) |
-               bit(X86_FEATURE_3DNOWEXT) |
-               bit(X86_FEATURE_3DNOW);
-       const u32 kvm_supported_word3_x86_features =
-               bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
+
+       /* cpuid 1.edx */
+       const u32 kvm_supported_word0_x86_features =
+               F(FPU) | F(VME) | F(DE) | F(PSE) |
+               F(TSC) | F(MSR) | F(PAE) | F(MCE) |
+               F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
+               F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
+               F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
+               0 /* Reserved, DS, ACPI */ | F(MMX) |
+               F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
+               0 /* HTT, TM, Reserved, PBE */;
+       /* cpuid 0x80000001.edx */
+       const u32 kvm_supported_word1_x86_features =
+               F(FPU) | F(VME) | F(DE) | F(PSE) |
+               F(TSC) | F(MSR) | F(PAE) | F(MCE) |
+               F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
+               F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
+               F(PAT) | F(PSE36) | 0 /* Reserved */ |
+               f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
+               F(FXSR) | F(FXSR_OPT) | 0 /* GBPAGES */ | 0 /* RDTSCP */ |
+               0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
+       /* cpuid 1.ecx */
+       const u32 kvm_supported_word4_x86_features =
+               F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
+               0 /* DS-CPL, VMX, SMX, EST */ |
+               0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
+               0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
+               0 /* Reserved, DCA */ | F(XMM4_1) |
+               F(XMM4_2) | 0 /* x2APIC */ | F(MOVBE) | F(POPCNT) |
+               0 /* Reserved, XSAVE, OSXSAVE */;
+       /* cpuid 0x80000001.ecx */
        const u32 kvm_supported_word6_x86_features =
-               bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY) |
-               bit(X86_FEATURE_SVM);
+               F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
+               F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
+               F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
+               0 /* SKINIT */ | 0 /* WDT */;
 
        /* all calls to cpuid_count() should be made on the same cpu */
        get_cpu();
@@ -1297,7 +1306,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                break;
        case 1:
                entry->edx &= kvm_supported_word0_x86_features;
-               entry->ecx &= kvm_supported_word3_x86_features;
+               entry->ecx &= kvm_supported_word4_x86_features;
                break;
        /* function 2 entries are STATEFUL. That is, repeated cpuid commands
         * may return different values. This forces us to get_cpu() before
@@ -1359,6 +1368,8 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
        put_cpu();
 }
 
+#undef F
+
 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
                                     struct kvm_cpuid_entry2 __user *entries)
 {
@@ -1614,10 +1625,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
                return -EINVAL;
 
        down_write(&kvm->slots_lock);
+       spin_lock(&kvm->mmu_lock);
 
        kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
        kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
 
+       spin_unlock(&kvm->mmu_lock);
        up_write(&kvm->slots_lock);
        return 0;
 }
@@ -1793,7 +1806,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 
        /* If nothing is dirty, don't bother messing with page tables. */
        if (is_dirty) {
+               spin_lock(&kvm->mmu_lock);
                kvm_mmu_slot_remove_write_access(kvm, log->slot);
+               spin_unlock(&kvm->mmu_lock);
                kvm_flush_remote_tlbs(kvm);
                memslot = &kvm->memslots[log->slot];
                n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
@@ -2368,7 +2383,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
                        u16 error_code,
                        int emulation_type)
 {
-       int r;
+       int r, shadow_mask;
        struct decode_cache *c;
 
        kvm_clear_exception_queue(vcpu);
@@ -2422,6 +2437,10 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
        }
 
        r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
+       shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
+
+       if (r == 0)
+               kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
 
        if (vcpu->arch.pio.string)
                return EMULATE_DO_MMIO;
@@ -3167,7 +3186,7 @@ static void inject_pending_irq(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                kvm_run->request_interrupt_window;
 
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
-               kvm_x86_ops->drop_interrupt_shadow(vcpu);
+               kvm_x86_ops->set_interrupt_shadow(vcpu, 0);
 
        inject_irq(vcpu);
 
@@ -3219,6 +3238,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        local_irq_disable();
 
+       clear_bit(KVM_REQ_KICK, &vcpu->requests);
+       smp_mb__after_clear_bit();
+
        if (vcpu->requests || need_resched() || signal_pending(current)) {
                local_irq_enable();
                preempt_enable();
@@ -3226,13 +3248,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                goto out;
        }
 
-       vcpu->guest_mode = 1;
-       /*
-        * Make sure that guest_mode assignment won't happen after
-        * testing the pending IRQ vector bitmap.
-        */
-       smp_wmb();
-
        if (vcpu->arch.exception.pending)
                __queue_exception(vcpu);
        else
@@ -3277,7 +3292,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        set_debugreg(vcpu->arch.host_dr6, 6);
        set_debugreg(vcpu->arch.host_dr7, 7);
 
-       vcpu->guest_mode = 0;
+       set_bit(KVM_REQ_KICK, &vcpu->requests);
        local_irq_enable();
 
        ++vcpu->stat.exits;
@@ -4537,12 +4552,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
                }
        }
 
+       spin_lock(&kvm->mmu_lock);
        if (!kvm->arch.n_requested_mmu_pages) {
                unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
                kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
        }
 
        kvm_mmu_slot_remove_write_access(kvm, mem->slot);
+       spin_unlock(&kvm->mmu_lock);
        kvm_flush_remote_tlbs(kvm);
 
        return 0;
@@ -4551,6 +4568,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
 void kvm_arch_flush_shadow(struct kvm *kvm)
 {
        kvm_mmu_zap_all(kvm);
+       kvm_reload_remote_mmus(kvm);
 }
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
@@ -4560,30 +4578,20 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
               || vcpu->arch.nmi_pending;
 }
 
-static void vcpu_kick_intr(void *info)
-{
-#ifdef DEBUG
-       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
-       printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
-#endif
-}
-
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
 {
-       int ipi_pcpu = vcpu->cpu;
-       int cpu;
+       int me;
+       int cpu = vcpu->cpu;
 
        if (waitqueue_active(&vcpu->wq)) {
                wake_up_interruptible(&vcpu->wq);
                ++vcpu->stat.halt_wakeup;
        }
-       /*
-        * We may be called synchronously with irqs disabled in guest mode,
-        * So need not to call smp_call_function_single() in that case.
-        */
-       cpu = get_cpu();
-       if (vcpu->guest_mode && vcpu->cpu != cpu)
-               smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
+
+       me = get_cpu();
+       if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
+               if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
+                       smp_send_reschedule(cpu);
        put_cpu();
 }
 
This page took 0.028004 seconds and 5 git commands to generate.