#define SEG_TYPE_LDT 2
#define SEG_TYPE_BUSY_TSS16 3
-#define KVM_EFER_LMA (1 << 10)
-#define KVM_EFER_LME (1 << 8)
-
#define SVM_FEATURE_NPT (1 << 0)
#define SVM_FEATURE_LBRV (1 << 1)
#define SVM_DEATURE_SVML (1 << 2)
static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
- if (!(efer & KVM_EFER_LMA))
- efer &= ~KVM_EFER_LME;
+ if (!(efer & EFER_LMA))
+ efer &= ~EFER_LME;
to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
vcpu->shadow_efer = efer;
}
-static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
+static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
+ bool has_error_code, u32 error_code)
{
struct vcpu_svm *svm = to_svm(vcpu);
- svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
- SVM_EVTINJ_VALID_ERR |
- SVM_EVTINJ_TYPE_EXEPT |
- GP_VECTOR;
+ svm->vmcb->control.event_inj = nr
+ | SVM_EVTINJ_VALID
+ | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
+ | SVM_EVTINJ_TYPE_EXEPT;
svm->vmcb->control.event_inj_err = error_code;
}
-static void inject_ud(struct kvm_vcpu *vcpu)
+static bool svm_exception_injected(struct kvm_vcpu *vcpu)
{
- to_svm(vcpu)->vmcb->control.event_inj = SVM_EVTINJ_VALID |
- SVM_EVTINJ_TYPE_EXEPT |
- UD_VECTOR;
-}
+ struct vcpu_svm *svm = to_svm(vcpu);
-static int is_page_fault(uint32_t info)
-{
- info &= SVM_EVTINJ_VEC_MASK | SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
- return info == (PF_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT);
+ return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID);
}
static int is_external_interrupt(u32 info)
control->intercept_cr_read = INTERCEPT_CR0_MASK |
INTERCEPT_CR3_MASK |
- INTERCEPT_CR4_MASK;
+ INTERCEPT_CR4_MASK |
+ INTERCEPT_CR8_MASK;
control->intercept_cr_write = INTERCEPT_CR0_MASK |
INTERCEPT_CR3_MASK |
- INTERCEPT_CR4_MASK;
+ INTERCEPT_CR4_MASK |
+ INTERCEPT_CR8_MASK;
control->intercept_dr_read = INTERCEPT_DR0_MASK |
INTERCEPT_DR1_MASK |
struct vcpu_svm *svm = to_svm(vcpu);
int i;
+ ++vcpu->stat.host_state_reload;
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
rdtscll(vcpu->host_tsc);
- kvm_put_guest_fpu(vcpu);
}
static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
#ifdef CONFIG_X86_64
- if (vcpu->shadow_efer & KVM_EFER_LME) {
+ if (vcpu->shadow_efer & EFER_LME) {
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
- vcpu->shadow_efer |= KVM_EFER_LMA;
- svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME;
+ vcpu->shadow_efer |= EFER_LMA;
+ svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
}
if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
- vcpu->shadow_efer &= ~KVM_EFER_LMA;
- svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME);
+ vcpu->shadow_efer &= ~EFER_LMA;
+ svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
}
}
#endif
er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0);
if (er != EMULATE_DONE)
- inject_ud(&svm->vcpu);
-
+ kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
static int invalid_op_interception(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
{
- inject_ud(&svm->vcpu);
+ kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
return 1;
}
+static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+ emulate_instruction(&svm->vcpu, NULL, 0, 0, 0);
+ if (irqchip_in_kernel(svm->vcpu.kvm))
+ return 1;
+ kvm_run->exit_reason = KVM_EXIT_SET_TPR;
+ return 0;
+}
+
static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
{
struct vcpu_svm *svm = to_svm(vcpu);
u64 data;
if (svm_get_msr(&svm->vcpu, ecx, &data))
- svm_inject_gp(&svm->vcpu, 0);
+ kvm_inject_gp(&svm->vcpu, 0);
else {
svm->vmcb->save.rax = data & 0xffffffff;
svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32;
| ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32);
svm->next_rip = svm->vmcb->save.rip + 2;
if (svm_set_msr(&svm->vcpu, ecx, data))
- svm_inject_gp(&svm->vcpu, 0);
+ kvm_inject_gp(&svm->vcpu, 0);
else
skip_emulated_instruction(&svm->vcpu);
return 1;
[SVM_EXIT_READ_CR0] = emulate_on_interception,
[SVM_EXIT_READ_CR3] = emulate_on_interception,
[SVM_EXIT_READ_CR4] = emulate_on_interception,
+ [SVM_EXIT_READ_CR8] = emulate_on_interception,
/* for now: */
[SVM_EXIT_WRITE_CR0] = emulate_on_interception,
[SVM_EXIT_WRITE_CR3] = emulate_on_interception,
[SVM_EXIT_WRITE_CR4] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
[SVM_EXIT_READ_DR0] = emulate_on_interception,
[SVM_EXIT_READ_DR1] = emulate_on_interception,
[SVM_EXIT_READ_DR2] = emulate_on_interception,
exit_code);
if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
- || svm_exit_handlers[exit_code] == 0) {
+ || !svm_exit_handlers[exit_code]) {
kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
kvm_run->hw.hardware_exit_reason = exit_code;
return 0;
}
}
-static void svm_inject_page_fault(struct kvm_vcpu *vcpu,
- unsigned long addr,
- uint32_t err_code)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
- uint32_t exit_int_info = svm->vmcb->control.exit_int_info;
-
- ++vcpu->stat.pf_guest;
-
- if (is_page_fault(exit_int_info)) {
-
- svm->vmcb->control.event_inj_err = 0;
- svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
- SVM_EVTINJ_VALID_ERR |
- SVM_EVTINJ_TYPE_EXEPT |
- DF_VECTOR;
- return;
- }
- vcpu->cr2 = addr;
- svm->vmcb->save.cr2 = addr;
- svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
- SVM_EVTINJ_VALID_ERR |
- SVM_EVTINJ_TYPE_EXEPT |
- PF_VECTOR;
- svm->vmcb->control.event_inj_err = err_code;
-}
-
-
static int is_disabled(void)
{
u64 vm_cr;
.set_rflags = svm_set_rflags,
.tlb_flush = svm_flush_tlb,
- .inject_page_fault = svm_inject_page_fault,
-
- .inject_gp = svm_inject_gp,
.run = svm_vcpu_run,
.handle_exit = handle_exit,
.patch_hypercall = svm_patch_hypercall,
.get_irq = svm_get_irq,
.set_irq = svm_set_irq,
+ .queue_exception = svm_queue_exception,
+ .exception_injected = svm_exception_injected,
.inject_pending_irq = svm_intr_assist,
.inject_pending_vectors = do_interrupt_requests,