KVM: remove export of emulator_write_emulated()
[deliverable/linux.git] / arch / x86 / kvm / x86.c
index 64c6e7a31411b1f98ef6f07929232742c466def9..15a4b754a451995ff03c7623b94d1bacb90214b7 100644 (file)
@@ -573,7 +573,7 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_get_cr8);
 
-int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
+static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
 {
        switch (dr) {
        case 0 ... 3:
@@ -582,29 +582,21 @@ int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
                        vcpu->arch.eff_db[dr] = val;
                break;
        case 4:
-               if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
-                       kvm_queue_exception(vcpu, UD_VECTOR);
-                       return 1;
-               }
+               if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
+                       return 1; /* #UD */
                /* fall through */
        case 6:
-               if (val & 0xffffffff00000000ULL) {
-                       kvm_inject_gp(vcpu, 0);
-                       return 1;
-               }
+               if (val & 0xffffffff00000000ULL)
+                       return -1; /* #GP */
                vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
                break;
        case 5:
-               if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
-                       kvm_queue_exception(vcpu, UD_VECTOR);
-                       return 1;
-               }
+               if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
+                       return 1; /* #UD */
                /* fall through */
        default: /* 7 */
-               if (val & 0xffffffff00000000ULL) {
-                       kvm_inject_gp(vcpu, 0);
-                       return 1;
-               }
+               if (val & 0xffffffff00000000ULL)
+                       return -1; /* #GP */
                vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
                if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
                        kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
@@ -615,28 +607,37 @@ int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
 
        return 0;
 }
+
+int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
+{
+       int res;
+
+       res = __kvm_set_dr(vcpu, dr, val);
+       if (res > 0)
+               kvm_queue_exception(vcpu, UD_VECTOR);
+       else if (res < 0)
+               kvm_inject_gp(vcpu, 0);
+
+       return res;
+}
 EXPORT_SYMBOL_GPL(kvm_set_dr);
 
-int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
+static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
 {
        switch (dr) {
        case 0 ... 3:
                *val = vcpu->arch.db[dr];
                break;
        case 4:
-               if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
-                       kvm_queue_exception(vcpu, UD_VECTOR);
+               if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
                        return 1;
-               }
                /* fall through */
        case 6:
                *val = vcpu->arch.dr6;
                break;
        case 5:
-               if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
-                       kvm_queue_exception(vcpu, UD_VECTOR);
+               if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
                        return 1;
-               }
                /* fall through */
        default: /* 7 */
                *val = vcpu->arch.dr7;
@@ -645,6 +646,15 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
 
        return 0;
 }
+
+int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
+{
+       if (_kvm_get_dr(vcpu, dr, val)) {
+               kvm_queue_exception(vcpu, UD_VECTOR);
+               return 1;
+       }
+       return 0;
+}
 EXPORT_SYMBOL_GPL(kvm_get_dr);
 
 static inline u32 bit(int bitno)
@@ -3265,7 +3275,7 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
                }
                ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
                if (ret < 0) {
-                       r = X86EMUL_UNHANDLEABLE;
+                       r = X86EMUL_IO_NEEDED;
                        goto out;
                }
 
@@ -3321,7 +3331,7 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val,
                }
                ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
                if (ret < 0) {
-                       r = X86EMUL_UNHANDLEABLE;
+                       r = X86EMUL_IO_NEEDED;
                        goto out;
                }
 
@@ -3376,11 +3386,12 @@ mmio:
        trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
 
        vcpu->mmio_needed = 1;
-       vcpu->mmio_phys_addr = gpa;
-       vcpu->mmio_size = bytes;
-       vcpu->mmio_is_write = 0;
+       vcpu->run->exit_reason = KVM_EXIT_MMIO;
+       vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
+       vcpu->run->mmio.len = vcpu->mmio_size = bytes;
+       vcpu->run->mmio.is_write = vcpu->mmio_is_write = 0;
 
-       return X86EMUL_UNHANDLEABLE;
+       return X86EMUL_IO_NEEDED;
 }
 
 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -3426,10 +3437,11 @@ mmio:
                return X86EMUL_CONTINUE;
 
        vcpu->mmio_needed = 1;
-       vcpu->mmio_phys_addr = gpa;
-       vcpu->mmio_size = bytes;
-       vcpu->mmio_is_write = 1;
-       memcpy(vcpu->mmio_data, val, bytes);
+       vcpu->run->exit_reason = KVM_EXIT_MMIO;
+       vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
+       vcpu->run->mmio.len = vcpu->mmio_size = bytes;
+       vcpu->run->mmio.is_write = vcpu->mmio_is_write = 1;
+       memcpy(vcpu->run->mmio.data, val, bytes);
 
        return X86EMUL_CONTINUE;
 }
@@ -3453,7 +3465,6 @@ int emulator_write_emulated(unsigned long addr,
        }
        return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
 }
-EXPORT_SYMBOL_GPL(emulator_write_emulated);
 
 #define CMPXCHG_TYPE(t, ptr, old, new) \
        (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
@@ -3619,12 +3630,13 @@ int emulate_clts(struct kvm_vcpu *vcpu)
 
 int emulator_get_dr(int dr, unsigned long *dest, struct kvm_vcpu *vcpu)
 {
-       return kvm_get_dr(vcpu, dr, dest);
+       return _kvm_get_dr(vcpu, dr, dest);
 }
 
 int emulator_set_dr(int dr, unsigned long value, struct kvm_vcpu *vcpu)
 {
-       return kvm_set_dr(vcpu, dr, value);
+
+       return __kvm_set_dr(vcpu, dr, value);
 }
 
 void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
@@ -3839,7 +3851,6 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
 {
        int r, shadow_mask;
        struct decode_cache *c;
-       struct kvm_run *run = vcpu->run;
 
        kvm_clear_exception_queue(vcpu);
        vcpu->arch.mmio_fault_cr2 = cr2;
@@ -3851,8 +3862,6 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
         */
        cache_all_regs(vcpu);
 
-       vcpu->mmio_is_write = 0;
-
        if (!(emulation_type & EMULTYPE_NO_DECODE)) {
                int cs_db, cs_l;
                kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
@@ -3926,32 +3935,26 @@ restart:
                return EMULATE_DO_MMIO;
        }
 
-       if (r || vcpu->mmio_is_write) {
-               run->exit_reason = KVM_EXIT_MMIO;
-               run->mmio.phys_addr = vcpu->mmio_phys_addr;
-               memcpy(run->mmio.data, vcpu->mmio_data, 8);
-               run->mmio.len = vcpu->mmio_size;
-               run->mmio.is_write = vcpu->mmio_is_write;
+       if (vcpu->mmio_needed) {
+               if (vcpu->mmio_is_write)
+                       vcpu->mmio_needed = 0;
+               return EMULATE_DO_MMIO;
        }
 
-       if (r) {
+       if (r) { /* emulation failed */
+               /*
+                * if emulation was due to access to shadowed page table
+                * and it failed try to unshadow page and re-entetr the
+                * guest to let CPU execute the instruction.
+                */
                if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
-                       goto done;
-               if (!vcpu->mmio_needed) {
-                       ++vcpu->stat.insn_emulation_fail;
-                       trace_kvm_emulate_insn_failed(vcpu);
-                       kvm_report_emulation_failure(vcpu, "mmio");
-                       return EMULATE_FAIL;
-               }
-               return EMULATE_DO_MMIO;
-       }
+                       return EMULATE_DONE;
 
-       if (vcpu->mmio_is_write) {
-               vcpu->mmio_needed = 0;
-               return EMULATE_DO_MMIO;
+               trace_kvm_emulate_insn_failed(vcpu);
+               kvm_report_emulation_failure(vcpu, "mmio");
+               return EMULATE_FAIL;
        }
 
-done:
        if (vcpu->arch.exception.pending)
                vcpu->arch.emulate_ctxt.restart = false;
 
This page took 0.052343 seconds and 5 git commands to generate.