2 * Kernel-based Virtual Machine driver for Linux
6 * Copyright (C) 2006 Qumranet, Inc.
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
18 #include "x86_emulate.h"
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/vmalloc.h>
24 #include <linux/highmem.h>
25 #include <linux/profile.h>
26 #include <linux/sched.h>
30 MODULE_AUTHOR("Qumranet");
31 MODULE_LICENSE("GPL");
33 #define IOPM_ALLOC_ORDER 2
34 #define MSRPM_ALLOC_ORDER 1
40 #define DR7_GD_MASK (1 << 13)
41 #define DR6_BD_MASK (1 << 13)
43 #define SEG_TYPE_LDT 2
44 #define SEG_TYPE_BUSY_TSS16 3
46 #define KVM_EFER_LMA (1 << 10)
47 #define KVM_EFER_LME (1 << 8)
49 #define SVM_FEATURE_NPT (1 << 0)
50 #define SVM_FEATURE_LBRV (1 << 1)
51 #define SVM_DEATURE_SVML (1 << 2)
53 static inline struct vcpu_svm
*to_svm(struct kvm_vcpu
*vcpu
)
55 return container_of(vcpu
, struct vcpu_svm
, vcpu
);
58 unsigned long iopm_base
;
59 unsigned long msrpm_base
;
61 struct kvm_ldttss_desc
{
64 unsigned base1
: 8, type
: 5, dpl
: 2, p
: 1;
65 unsigned limit1
: 4, zero0
: 3, g
: 1, base2
: 8;
68 } __attribute__((packed
));
76 struct kvm_ldttss_desc
*tss_desc
;
78 struct page
*save_area
;
81 static DEFINE_PER_CPU(struct svm_cpu_data
*, svm_data
);
82 static uint32_t svm_features
;
84 struct svm_init_data
{
89 static u32 msrpm_ranges
[] = {0, 0xc0000000, 0xc0010000};
91 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
92 #define MSRS_RANGE_SIZE 2048
93 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
95 #define MAX_INST_SIZE 15
97 static inline u32
svm_has(u32 feat
)
99 return svm_features
& feat
;
102 static inline u8
pop_irq(struct kvm_vcpu
*vcpu
)
104 int word_index
= __ffs(vcpu
->irq_summary
);
105 int bit_index
= __ffs(vcpu
->irq_pending
[word_index
]);
106 int irq
= word_index
* BITS_PER_LONG
+ bit_index
;
108 clear_bit(bit_index
, &vcpu
->irq_pending
[word_index
]);
109 if (!vcpu
->irq_pending
[word_index
])
110 clear_bit(word_index
, &vcpu
->irq_summary
);
114 static inline void push_irq(struct kvm_vcpu
*vcpu
, u8 irq
)
116 set_bit(irq
, vcpu
->irq_pending
);
117 set_bit(irq
/ BITS_PER_LONG
, &vcpu
->irq_summary
);
120 static inline void clgi(void)
122 asm volatile (SVM_CLGI
);
125 static inline void stgi(void)
127 asm volatile (SVM_STGI
);
130 static inline void invlpga(unsigned long addr
, u32 asid
)
132 asm volatile (SVM_INVLPGA :: "a"(addr
), "c"(asid
));
135 static inline unsigned long kvm_read_cr2(void)
139 asm volatile ("mov %%cr2, %0" : "=r" (cr2
));
143 static inline void kvm_write_cr2(unsigned long val
)
145 asm volatile ("mov %0, %%cr2" :: "r" (val
));
148 static inline unsigned long read_dr6(void)
152 asm volatile ("mov %%dr6, %0" : "=r" (dr6
));
156 static inline void write_dr6(unsigned long val
)
158 asm volatile ("mov %0, %%dr6" :: "r" (val
));
161 static inline unsigned long read_dr7(void)
165 asm volatile ("mov %%dr7, %0" : "=r" (dr7
));
169 static inline void write_dr7(unsigned long val
)
171 asm volatile ("mov %0, %%dr7" :: "r" (val
));
174 static inline void force_new_asid(struct kvm_vcpu
*vcpu
)
176 to_svm(vcpu
)->asid_generation
--;
179 static inline void flush_guest_tlb(struct kvm_vcpu
*vcpu
)
181 force_new_asid(vcpu
);
184 static void svm_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
186 if (!(efer
& KVM_EFER_LMA
))
187 efer
&= ~KVM_EFER_LME
;
189 to_svm(vcpu
)->vmcb
->save
.efer
= efer
| MSR_EFER_SVME_MASK
;
190 vcpu
->shadow_efer
= efer
;
193 static void svm_inject_gp(struct kvm_vcpu
*vcpu
, unsigned error_code
)
195 struct vcpu_svm
*svm
= to_svm(vcpu
);
197 svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
198 SVM_EVTINJ_VALID_ERR
|
199 SVM_EVTINJ_TYPE_EXEPT
|
201 svm
->vmcb
->control
.event_inj_err
= error_code
;
204 static void inject_ud(struct kvm_vcpu
*vcpu
)
206 to_svm(vcpu
)->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
207 SVM_EVTINJ_TYPE_EXEPT
|
211 static int is_page_fault(uint32_t info
)
213 info
&= SVM_EVTINJ_VEC_MASK
| SVM_EVTINJ_TYPE_MASK
| SVM_EVTINJ_VALID
;
214 return info
== (PF_VECTOR
| SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_EXEPT
);
217 static int is_external_interrupt(u32 info
)
219 info
&= SVM_EVTINJ_TYPE_MASK
| SVM_EVTINJ_VALID
;
220 return info
== (SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_INTR
);
223 static void skip_emulated_instruction(struct kvm_vcpu
*vcpu
)
225 struct vcpu_svm
*svm
= to_svm(vcpu
);
227 if (!svm
->next_rip
) {
228 printk(KERN_DEBUG
"%s: NOP\n", __FUNCTION__
);
231 if (svm
->next_rip
- svm
->vmcb
->save
.rip
> MAX_INST_SIZE
) {
232 printk(KERN_ERR
"%s: ip 0x%llx next 0x%llx\n",
238 vcpu
->rip
= svm
->vmcb
->save
.rip
= svm
->next_rip
;
239 svm
->vmcb
->control
.int_state
&= ~SVM_INTERRUPT_SHADOW_MASK
;
241 vcpu
->interrupt_window_open
= 1;
244 static int has_svm(void)
246 uint32_t eax
, ebx
, ecx
, edx
;
248 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
) {
249 printk(KERN_INFO
"has_svm: not amd\n");
253 cpuid(0x80000000, &eax
, &ebx
, &ecx
, &edx
);
254 if (eax
< SVM_CPUID_FUNC
) {
255 printk(KERN_INFO
"has_svm: can't execute cpuid_8000000a\n");
259 cpuid(0x80000001, &eax
, &ebx
, &ecx
, &edx
);
260 if (!(ecx
& (1 << SVM_CPUID_FEATURE_SHIFT
))) {
261 printk(KERN_DEBUG
"has_svm: svm not available\n");
267 static void svm_hardware_disable(void *garbage
)
269 struct svm_cpu_data
*svm_data
270 = per_cpu(svm_data
, raw_smp_processor_id());
275 wrmsrl(MSR_VM_HSAVE_PA
, 0);
276 rdmsrl(MSR_EFER
, efer
);
277 wrmsrl(MSR_EFER
, efer
& ~MSR_EFER_SVME_MASK
);
278 per_cpu(svm_data
, raw_smp_processor_id()) = NULL
;
279 __free_page(svm_data
->save_area
);
284 static void svm_hardware_enable(void *garbage
)
287 struct svm_cpu_data
*svm_data
;
290 struct desc_ptr gdt_descr
;
292 struct Xgt_desc_struct gdt_descr
;
294 struct desc_struct
*gdt
;
295 int me
= raw_smp_processor_id();
298 printk(KERN_ERR
"svm_cpu_init: err EOPNOTSUPP on %d\n", me
);
301 svm_data
= per_cpu(svm_data
, me
);
304 printk(KERN_ERR
"svm_cpu_init: svm_data is NULL on %d\n",
309 svm_data
->asid_generation
= 1;
310 svm_data
->max_asid
= cpuid_ebx(SVM_CPUID_FUNC
) - 1;
311 svm_data
->next_asid
= svm_data
->max_asid
+ 1;
312 svm_features
= cpuid_edx(SVM_CPUID_FUNC
);
314 asm volatile ( "sgdt %0" : "=m"(gdt_descr
) );
315 gdt
= (struct desc_struct
*)gdt_descr
.address
;
316 svm_data
->tss_desc
= (struct kvm_ldttss_desc
*)(gdt
+ GDT_ENTRY_TSS
);
318 rdmsrl(MSR_EFER
, efer
);
319 wrmsrl(MSR_EFER
, efer
| MSR_EFER_SVME_MASK
);
321 wrmsrl(MSR_VM_HSAVE_PA
,
322 page_to_pfn(svm_data
->save_area
) << PAGE_SHIFT
);
325 static int svm_cpu_init(int cpu
)
327 struct svm_cpu_data
*svm_data
;
330 svm_data
= kzalloc(sizeof(struct svm_cpu_data
), GFP_KERNEL
);
334 svm_data
->save_area
= alloc_page(GFP_KERNEL
);
336 if (!svm_data
->save_area
)
339 per_cpu(svm_data
, cpu
) = svm_data
;
349 static void set_msr_interception(u32
*msrpm
, unsigned msr
,
354 for (i
= 0; i
< NUM_MSR_MAPS
; i
++) {
355 if (msr
>= msrpm_ranges
[i
] &&
356 msr
< msrpm_ranges
[i
] + MSRS_IN_RANGE
) {
357 u32 msr_offset
= (i
* MSRS_IN_RANGE
+ msr
-
358 msrpm_ranges
[i
]) * 2;
360 u32
*base
= msrpm
+ (msr_offset
/ 32);
361 u32 msr_shift
= msr_offset
% 32;
362 u32 mask
= ((write
) ? 0 : 2) | ((read
) ? 0 : 1);
363 *base
= (*base
& ~(0x3 << msr_shift
)) |
371 static __init
int svm_hardware_setup(void)
374 struct page
*iopm_pages
;
375 struct page
*msrpm_pages
;
376 void *iopm_va
, *msrpm_va
;
379 kvm_emulator_want_group7_invlpg();
381 iopm_pages
= alloc_pages(GFP_KERNEL
, IOPM_ALLOC_ORDER
);
386 iopm_va
= page_address(iopm_pages
);
387 memset(iopm_va
, 0xff, PAGE_SIZE
* (1 << IOPM_ALLOC_ORDER
));
388 clear_bit(0x80, iopm_va
); /* allow direct access to PC debug port */
389 iopm_base
= page_to_pfn(iopm_pages
) << PAGE_SHIFT
;
392 msrpm_pages
= alloc_pages(GFP_KERNEL
, MSRPM_ALLOC_ORDER
);
398 msrpm_va
= page_address(msrpm_pages
);
399 memset(msrpm_va
, 0xff, PAGE_SIZE
* (1 << MSRPM_ALLOC_ORDER
));
400 msrpm_base
= page_to_pfn(msrpm_pages
) << PAGE_SHIFT
;
403 set_msr_interception(msrpm_va
, MSR_GS_BASE
, 1, 1);
404 set_msr_interception(msrpm_va
, MSR_FS_BASE
, 1, 1);
405 set_msr_interception(msrpm_va
, MSR_KERNEL_GS_BASE
, 1, 1);
406 set_msr_interception(msrpm_va
, MSR_LSTAR
, 1, 1);
407 set_msr_interception(msrpm_va
, MSR_CSTAR
, 1, 1);
408 set_msr_interception(msrpm_va
, MSR_SYSCALL_MASK
, 1, 1);
410 set_msr_interception(msrpm_va
, MSR_K6_STAR
, 1, 1);
411 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_CS
, 1, 1);
412 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_ESP
, 1, 1);
413 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_EIP
, 1, 1);
415 for_each_online_cpu(cpu
) {
416 r
= svm_cpu_init(cpu
);
423 __free_pages(msrpm_pages
, MSRPM_ALLOC_ORDER
);
426 __free_pages(iopm_pages
, IOPM_ALLOC_ORDER
);
431 static __exit
void svm_hardware_unsetup(void)
433 __free_pages(pfn_to_page(msrpm_base
>> PAGE_SHIFT
), MSRPM_ALLOC_ORDER
);
434 __free_pages(pfn_to_page(iopm_base
>> PAGE_SHIFT
), IOPM_ALLOC_ORDER
);
435 iopm_base
= msrpm_base
= 0;
438 static void init_seg(struct vmcb_seg
*seg
)
441 seg
->attrib
= SVM_SELECTOR_P_MASK
| SVM_SELECTOR_S_MASK
|
442 SVM_SELECTOR_WRITE_MASK
; /* Read/Write Data Segment */
447 static void init_sys_seg(struct vmcb_seg
*seg
, uint32_t type
)
450 seg
->attrib
= SVM_SELECTOR_P_MASK
| type
;
455 static void init_vmcb(struct vmcb
*vmcb
)
457 struct vmcb_control_area
*control
= &vmcb
->control
;
458 struct vmcb_save_area
*save
= &vmcb
->save
;
460 control
->intercept_cr_read
= INTERCEPT_CR0_MASK
|
464 control
->intercept_cr_write
= INTERCEPT_CR0_MASK
|
468 control
->intercept_dr_read
= INTERCEPT_DR0_MASK
|
473 control
->intercept_dr_write
= INTERCEPT_DR0_MASK
|
480 control
->intercept_exceptions
= 1 << PF_VECTOR
;
483 control
->intercept
= (1ULL << INTERCEPT_INTR
) |
484 (1ULL << INTERCEPT_NMI
) |
485 (1ULL << INTERCEPT_SMI
) |
487 * selective cr0 intercept bug?
488 * 0: 0f 22 d8 mov %eax,%cr3
489 * 3: 0f 20 c0 mov %cr0,%eax
490 * 6: 0d 00 00 00 80 or $0x80000000,%eax
491 * b: 0f 22 c0 mov %eax,%cr0
492 * set cr3 ->interception
493 * get cr0 ->interception
494 * set cr0 -> no interception
496 /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */
497 (1ULL << INTERCEPT_CPUID
) |
498 (1ULL << INTERCEPT_HLT
) |
499 (1ULL << INTERCEPT_INVLPGA
) |
500 (1ULL << INTERCEPT_IOIO_PROT
) |
501 (1ULL << INTERCEPT_MSR_PROT
) |
502 (1ULL << INTERCEPT_TASK_SWITCH
) |
503 (1ULL << INTERCEPT_SHUTDOWN
) |
504 (1ULL << INTERCEPT_VMRUN
) |
505 (1ULL << INTERCEPT_VMMCALL
) |
506 (1ULL << INTERCEPT_VMLOAD
) |
507 (1ULL << INTERCEPT_VMSAVE
) |
508 (1ULL << INTERCEPT_STGI
) |
509 (1ULL << INTERCEPT_CLGI
) |
510 (1ULL << INTERCEPT_SKINIT
) |
511 (1ULL << INTERCEPT_MONITOR
) |
512 (1ULL << INTERCEPT_MWAIT
);
514 control
->iopm_base_pa
= iopm_base
;
515 control
->msrpm_base_pa
= msrpm_base
;
516 control
->tsc_offset
= 0;
517 control
->int_ctl
= V_INTR_MASKING_MASK
;
525 save
->cs
.selector
= 0xf000;
526 /* Executable/Readable Code Segment */
527 save
->cs
.attrib
= SVM_SELECTOR_READ_MASK
| SVM_SELECTOR_P_MASK
|
528 SVM_SELECTOR_S_MASK
| SVM_SELECTOR_CODE_MASK
;
529 save
->cs
.limit
= 0xffff;
531 * cs.base should really be 0xffff0000, but vmx can't handle that, so
532 * be consistent with it.
534 * Replace when we have real mode working for vmx.
536 save
->cs
.base
= 0xf0000;
538 save
->gdtr
.limit
= 0xffff;
539 save
->idtr
.limit
= 0xffff;
541 init_sys_seg(&save
->ldtr
, SEG_TYPE_LDT
);
542 init_sys_seg(&save
->tr
, SEG_TYPE_BUSY_TSS16
);
544 save
->efer
= MSR_EFER_SVME_MASK
;
546 save
->dr6
= 0xffff0ff0;
549 save
->rip
= 0x0000fff0;
552 * cr0 val on cpu init should be 0x60000010, we enable cpu
553 * cache by default. the orderly way is to enable cache in bios.
555 save
->cr0
= 0x00000010 | X86_CR0_PG
| X86_CR0_WP
;
556 save
->cr4
= X86_CR4_PAE
;
560 static struct kvm_vcpu
*svm_create_vcpu(struct kvm
*kvm
, unsigned int id
)
562 struct vcpu_svm
*svm
;
566 svm
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
572 err
= kvm_vcpu_init(&svm
->vcpu
, kvm
, id
);
576 if (irqchip_in_kernel(kvm
)) {
577 err
= kvm_create_lapic(&svm
->vcpu
);
582 page
= alloc_page(GFP_KERNEL
);
588 svm
->vmcb
= page_address(page
);
589 clear_page(svm
->vmcb
);
590 svm
->vmcb_pa
= page_to_pfn(page
) << PAGE_SHIFT
;
591 svm
->asid_generation
= 0;
592 memset(svm
->db_regs
, 0, sizeof(svm
->db_regs
));
593 init_vmcb(svm
->vmcb
);
596 svm
->vcpu
.fpu_active
= 1;
597 svm
->vcpu
.apic_base
= 0xfee00000 | MSR_IA32_APICBASE_ENABLE
;
598 if (svm
->vcpu
.vcpu_id
== 0)
599 svm
->vcpu
.apic_base
|= MSR_IA32_APICBASE_BSP
;
604 kvm_vcpu_uninit(&svm
->vcpu
);
606 kmem_cache_free(kvm_vcpu_cache
, svm
);
611 static void svm_free_vcpu(struct kvm_vcpu
*vcpu
)
613 struct vcpu_svm
*svm
= to_svm(vcpu
);
615 __free_page(pfn_to_page(svm
->vmcb_pa
>> PAGE_SHIFT
));
616 kvm_vcpu_uninit(vcpu
);
617 kmem_cache_free(kvm_vcpu_cache
, svm
);
620 static void svm_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
622 struct vcpu_svm
*svm
= to_svm(vcpu
);
625 if (unlikely(cpu
!= vcpu
->cpu
)) {
629 * Make sure that the guest sees a monotonically
633 delta
= vcpu
->host_tsc
- tsc_this
;
634 svm
->vmcb
->control
.tsc_offset
+= delta
;
636 kvm_migrate_apic_timer(vcpu
);
639 for (i
= 0; i
< NR_HOST_SAVE_USER_MSRS
; i
++)
640 rdmsrl(host_save_user_msrs
[i
], svm
->host_user_msrs
[i
]);
643 static void svm_vcpu_put(struct kvm_vcpu
*vcpu
)
645 struct vcpu_svm
*svm
= to_svm(vcpu
);
648 for (i
= 0; i
< NR_HOST_SAVE_USER_MSRS
; i
++)
649 wrmsrl(host_save_user_msrs
[i
], svm
->host_user_msrs
[i
]);
651 rdtscll(vcpu
->host_tsc
);
654 static void svm_vcpu_decache(struct kvm_vcpu
*vcpu
)
658 static void svm_cache_regs(struct kvm_vcpu
*vcpu
)
660 struct vcpu_svm
*svm
= to_svm(vcpu
);
662 vcpu
->regs
[VCPU_REGS_RAX
] = svm
->vmcb
->save
.rax
;
663 vcpu
->regs
[VCPU_REGS_RSP
] = svm
->vmcb
->save
.rsp
;
664 vcpu
->rip
= svm
->vmcb
->save
.rip
;
667 static void svm_decache_regs(struct kvm_vcpu
*vcpu
)
669 struct vcpu_svm
*svm
= to_svm(vcpu
);
670 svm
->vmcb
->save
.rax
= vcpu
->regs
[VCPU_REGS_RAX
];
671 svm
->vmcb
->save
.rsp
= vcpu
->regs
[VCPU_REGS_RSP
];
672 svm
->vmcb
->save
.rip
= vcpu
->rip
;
675 static unsigned long svm_get_rflags(struct kvm_vcpu
*vcpu
)
677 return to_svm(vcpu
)->vmcb
->save
.rflags
;
680 static void svm_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
)
682 to_svm(vcpu
)->vmcb
->save
.rflags
= rflags
;
685 static struct vmcb_seg
*svm_seg(struct kvm_vcpu
*vcpu
, int seg
)
687 struct vmcb_save_area
*save
= &to_svm(vcpu
)->vmcb
->save
;
690 case VCPU_SREG_CS
: return &save
->cs
;
691 case VCPU_SREG_DS
: return &save
->ds
;
692 case VCPU_SREG_ES
: return &save
->es
;
693 case VCPU_SREG_FS
: return &save
->fs
;
694 case VCPU_SREG_GS
: return &save
->gs
;
695 case VCPU_SREG_SS
: return &save
->ss
;
696 case VCPU_SREG_TR
: return &save
->tr
;
697 case VCPU_SREG_LDTR
: return &save
->ldtr
;
703 static u64
svm_get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
705 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
710 static void svm_get_segment(struct kvm_vcpu
*vcpu
,
711 struct kvm_segment
*var
, int seg
)
713 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
716 var
->limit
= s
->limit
;
717 var
->selector
= s
->selector
;
718 var
->type
= s
->attrib
& SVM_SELECTOR_TYPE_MASK
;
719 var
->s
= (s
->attrib
>> SVM_SELECTOR_S_SHIFT
) & 1;
720 var
->dpl
= (s
->attrib
>> SVM_SELECTOR_DPL_SHIFT
) & 3;
721 var
->present
= (s
->attrib
>> SVM_SELECTOR_P_SHIFT
) & 1;
722 var
->avl
= (s
->attrib
>> SVM_SELECTOR_AVL_SHIFT
) & 1;
723 var
->l
= (s
->attrib
>> SVM_SELECTOR_L_SHIFT
) & 1;
724 var
->db
= (s
->attrib
>> SVM_SELECTOR_DB_SHIFT
) & 1;
725 var
->g
= (s
->attrib
>> SVM_SELECTOR_G_SHIFT
) & 1;
726 var
->unusable
= !var
->present
;
729 static void svm_get_cs_db_l_bits(struct kvm_vcpu
*vcpu
, int *db
, int *l
)
731 struct vmcb_seg
*s
= svm_seg(vcpu
, VCPU_SREG_CS
);
733 *db
= (s
->attrib
>> SVM_SELECTOR_DB_SHIFT
) & 1;
734 *l
= (s
->attrib
>> SVM_SELECTOR_L_SHIFT
) & 1;
737 static void svm_get_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
739 struct vcpu_svm
*svm
= to_svm(vcpu
);
741 dt
->limit
= svm
->vmcb
->save
.idtr
.limit
;
742 dt
->base
= svm
->vmcb
->save
.idtr
.base
;
745 static void svm_set_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
747 struct vcpu_svm
*svm
= to_svm(vcpu
);
749 svm
->vmcb
->save
.idtr
.limit
= dt
->limit
;
750 svm
->vmcb
->save
.idtr
.base
= dt
->base
;
753 static void svm_get_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
755 struct vcpu_svm
*svm
= to_svm(vcpu
);
757 dt
->limit
= svm
->vmcb
->save
.gdtr
.limit
;
758 dt
->base
= svm
->vmcb
->save
.gdtr
.base
;
761 static void svm_set_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
763 struct vcpu_svm
*svm
= to_svm(vcpu
);
765 svm
->vmcb
->save
.gdtr
.limit
= dt
->limit
;
766 svm
->vmcb
->save
.gdtr
.base
= dt
->base
;
769 static void svm_decache_cr4_guest_bits(struct kvm_vcpu
*vcpu
)
773 static void svm_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
775 struct vcpu_svm
*svm
= to_svm(vcpu
);
778 if (vcpu
->shadow_efer
& KVM_EFER_LME
) {
779 if (!is_paging(vcpu
) && (cr0
& X86_CR0_PG
)) {
780 vcpu
->shadow_efer
|= KVM_EFER_LMA
;
781 svm
->vmcb
->save
.efer
|= KVM_EFER_LMA
| KVM_EFER_LME
;
784 if (is_paging(vcpu
) && !(cr0
& X86_CR0_PG
) ) {
785 vcpu
->shadow_efer
&= ~KVM_EFER_LMA
;
786 svm
->vmcb
->save
.efer
&= ~(KVM_EFER_LMA
| KVM_EFER_LME
);
790 if ((vcpu
->cr0
& X86_CR0_TS
) && !(cr0
& X86_CR0_TS
)) {
791 svm
->vmcb
->control
.intercept_exceptions
&= ~(1 << NM_VECTOR
);
792 vcpu
->fpu_active
= 1;
796 cr0
|= X86_CR0_PG
| X86_CR0_WP
;
797 cr0
&= ~(X86_CR0_CD
| X86_CR0_NW
);
798 svm
->vmcb
->save
.cr0
= cr0
;
801 static void svm_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
804 to_svm(vcpu
)->vmcb
->save
.cr4
= cr4
| X86_CR4_PAE
;
807 static void svm_set_segment(struct kvm_vcpu
*vcpu
,
808 struct kvm_segment
*var
, int seg
)
810 struct vcpu_svm
*svm
= to_svm(vcpu
);
811 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
814 s
->limit
= var
->limit
;
815 s
->selector
= var
->selector
;
819 s
->attrib
= (var
->type
& SVM_SELECTOR_TYPE_MASK
);
820 s
->attrib
|= (var
->s
& 1) << SVM_SELECTOR_S_SHIFT
;
821 s
->attrib
|= (var
->dpl
& 3) << SVM_SELECTOR_DPL_SHIFT
;
822 s
->attrib
|= (var
->present
& 1) << SVM_SELECTOR_P_SHIFT
;
823 s
->attrib
|= (var
->avl
& 1) << SVM_SELECTOR_AVL_SHIFT
;
824 s
->attrib
|= (var
->l
& 1) << SVM_SELECTOR_L_SHIFT
;
825 s
->attrib
|= (var
->db
& 1) << SVM_SELECTOR_DB_SHIFT
;
826 s
->attrib
|= (var
->g
& 1) << SVM_SELECTOR_G_SHIFT
;
828 if (seg
== VCPU_SREG_CS
)
830 = (svm
->vmcb
->save
.cs
.attrib
831 >> SVM_SELECTOR_DPL_SHIFT
) & 3;
837 svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK;
838 svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
842 static int svm_guest_debug(struct kvm_vcpu
*vcpu
, struct kvm_debug_guest
*dbg
)
847 static int svm_get_irq(struct kvm_vcpu
*vcpu
)
849 struct vcpu_svm
*svm
= to_svm(vcpu
);
850 u32 exit_int_info
= svm
->vmcb
->control
.exit_int_info
;
852 if (is_external_interrupt(exit_int_info
))
853 return exit_int_info
& SVM_EVTINJ_VEC_MASK
;
857 static void load_host_msrs(struct kvm_vcpu
*vcpu
)
860 wrmsrl(MSR_GS_BASE
, to_svm(vcpu
)->host_gs_base
);
864 static void save_host_msrs(struct kvm_vcpu
*vcpu
)
867 rdmsrl(MSR_GS_BASE
, to_svm(vcpu
)->host_gs_base
);
871 static void new_asid(struct vcpu_svm
*svm
, struct svm_cpu_data
*svm_data
)
873 if (svm_data
->next_asid
> svm_data
->max_asid
) {
874 ++svm_data
->asid_generation
;
875 svm_data
->next_asid
= 1;
876 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_FLUSH_ALL_ASID
;
879 svm
->vcpu
.cpu
= svm_data
->cpu
;
880 svm
->asid_generation
= svm_data
->asid_generation
;
881 svm
->vmcb
->control
.asid
= svm_data
->next_asid
++;
884 static void svm_invlpg(struct kvm_vcpu
*vcpu
, gva_t address
)
886 invlpga(address
, to_svm(vcpu
)->vmcb
->control
.asid
); // is needed?
889 static unsigned long svm_get_dr(struct kvm_vcpu
*vcpu
, int dr
)
891 return to_svm(vcpu
)->db_regs
[dr
];
894 static void svm_set_dr(struct kvm_vcpu
*vcpu
, int dr
, unsigned long value
,
897 struct vcpu_svm
*svm
= to_svm(vcpu
);
901 if (svm
->vmcb
->save
.dr7
& DR7_GD_MASK
) {
902 svm
->vmcb
->save
.dr7
&= ~DR7_GD_MASK
;
903 svm
->vmcb
->save
.dr6
|= DR6_BD_MASK
;
904 *exception
= DB_VECTOR
;
910 svm
->db_regs
[dr
] = value
;
913 if (vcpu
->cr4
& X86_CR4_DE
) {
914 *exception
= UD_VECTOR
;
918 if (value
& ~((1ULL << 32) - 1)) {
919 *exception
= GP_VECTOR
;
922 svm
->vmcb
->save
.dr7
= value
;
926 printk(KERN_DEBUG
"%s: unexpected dr %u\n",
928 *exception
= UD_VECTOR
;
933 static int pf_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
935 u32 exit_int_info
= svm
->vmcb
->control
.exit_int_info
;
936 struct kvm
*kvm
= svm
->vcpu
.kvm
;
939 enum emulation_result er
;
942 if (!irqchip_in_kernel(kvm
) &&
943 is_external_interrupt(exit_int_info
))
944 push_irq(&svm
->vcpu
, exit_int_info
& SVM_EVTINJ_VEC_MASK
);
946 mutex_lock(&kvm
->lock
);
948 fault_address
= svm
->vmcb
->control
.exit_info_2
;
949 error_code
= svm
->vmcb
->control
.exit_info_1
;
950 r
= kvm_mmu_page_fault(&svm
->vcpu
, fault_address
, error_code
);
952 mutex_unlock(&kvm
->lock
);
956 mutex_unlock(&kvm
->lock
);
959 er
= emulate_instruction(&svm
->vcpu
, kvm_run
, fault_address
,
961 mutex_unlock(&kvm
->lock
);
966 case EMULATE_DO_MMIO
:
967 ++svm
->vcpu
.stat
.mmio_exits
;
970 vcpu_printf(&svm
->vcpu
, "%s: emulate fail\n", __FUNCTION__
);
976 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
980 static int nm_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
982 svm
->vmcb
->control
.intercept_exceptions
&= ~(1 << NM_VECTOR
);
983 if (!(svm
->vcpu
.cr0
& X86_CR0_TS
))
984 svm
->vmcb
->save
.cr0
&= ~X86_CR0_TS
;
985 svm
->vcpu
.fpu_active
= 1;
990 static int shutdown_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
993 * VMCB is undefined after a SHUTDOWN intercept
994 * so reinitialize it.
996 clear_page(svm
->vmcb
);
997 init_vmcb(svm
->vmcb
);
999 kvm_run
->exit_reason
= KVM_EXIT_SHUTDOWN
;
1003 static int io_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1005 u32 io_info
= svm
->vmcb
->control
.exit_info_1
; //address size bug?
1006 int size
, down
, in
, string
, rep
;
1009 ++svm
->vcpu
.stat
.io_exits
;
1011 svm
->next_rip
= svm
->vmcb
->control
.exit_info_2
;
1013 string
= (io_info
& SVM_IOIO_STR_MASK
) != 0;
1016 if (emulate_instruction(&svm
->vcpu
, kvm_run
, 0, 0) == EMULATE_DO_MMIO
)
1021 in
= (io_info
& SVM_IOIO_TYPE_MASK
) != 0;
1022 port
= io_info
>> 16;
1023 size
= (io_info
& SVM_IOIO_SIZE_MASK
) >> SVM_IOIO_SIZE_SHIFT
;
1024 rep
= (io_info
& SVM_IOIO_REP_MASK
) != 0;
1025 down
= (svm
->vmcb
->save
.rflags
& X86_EFLAGS_DF
) != 0;
1027 return kvm_emulate_pio(&svm
->vcpu
, kvm_run
, in
, size
, port
);
1030 static int nop_on_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1035 static int halt_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1037 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 1;
1038 skip_emulated_instruction(&svm
->vcpu
);
1039 return kvm_emulate_halt(&svm
->vcpu
);
1042 static int vmmcall_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1044 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 3;
1045 skip_emulated_instruction(&svm
->vcpu
);
1046 return kvm_hypercall(&svm
->vcpu
, kvm_run
);
1049 static int invalid_op_interception(struct vcpu_svm
*svm
,
1050 struct kvm_run
*kvm_run
)
1052 inject_ud(&svm
->vcpu
);
1056 static int task_switch_interception(struct vcpu_svm
*svm
,
1057 struct kvm_run
*kvm_run
)
1059 pr_unimpl(&svm
->vcpu
, "%s: task switch is unsupported\n", __FUNCTION__
);
1060 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1064 static int cpuid_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1066 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 2;
1067 kvm_emulate_cpuid(&svm
->vcpu
);
1071 static int emulate_on_interception(struct vcpu_svm
*svm
,
1072 struct kvm_run
*kvm_run
)
1074 if (emulate_instruction(&svm
->vcpu
, NULL
, 0, 0) != EMULATE_DONE
)
1075 pr_unimpl(&svm
->vcpu
, "%s: failed\n", __FUNCTION__
);
1079 static int svm_get_msr(struct kvm_vcpu
*vcpu
, unsigned ecx
, u64
*data
)
1081 struct vcpu_svm
*svm
= to_svm(vcpu
);
1084 case MSR_IA32_TIME_STAMP_COUNTER
: {
1088 *data
= svm
->vmcb
->control
.tsc_offset
+ tsc
;
1092 *data
= svm
->vmcb
->save
.star
;
1094 #ifdef CONFIG_X86_64
1096 *data
= svm
->vmcb
->save
.lstar
;
1099 *data
= svm
->vmcb
->save
.cstar
;
1101 case MSR_KERNEL_GS_BASE
:
1102 *data
= svm
->vmcb
->save
.kernel_gs_base
;
1104 case MSR_SYSCALL_MASK
:
1105 *data
= svm
->vmcb
->save
.sfmask
;
1108 case MSR_IA32_SYSENTER_CS
:
1109 *data
= svm
->vmcb
->save
.sysenter_cs
;
1111 case MSR_IA32_SYSENTER_EIP
:
1112 *data
= svm
->vmcb
->save
.sysenter_eip
;
1114 case MSR_IA32_SYSENTER_ESP
:
1115 *data
= svm
->vmcb
->save
.sysenter_esp
;
1118 return kvm_get_msr_common(vcpu
, ecx
, data
);
1123 static int rdmsr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1125 u32 ecx
= svm
->vcpu
.regs
[VCPU_REGS_RCX
];
1128 if (svm_get_msr(&svm
->vcpu
, ecx
, &data
))
1129 svm_inject_gp(&svm
->vcpu
, 0);
1131 svm
->vmcb
->save
.rax
= data
& 0xffffffff;
1132 svm
->vcpu
.regs
[VCPU_REGS_RDX
] = data
>> 32;
1133 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 2;
1134 skip_emulated_instruction(&svm
->vcpu
);
1139 static int svm_set_msr(struct kvm_vcpu
*vcpu
, unsigned ecx
, u64 data
)
1141 struct vcpu_svm
*svm
= to_svm(vcpu
);
1144 case MSR_IA32_TIME_STAMP_COUNTER
: {
1148 svm
->vmcb
->control
.tsc_offset
= data
- tsc
;
1152 svm
->vmcb
->save
.star
= data
;
1154 #ifdef CONFIG_X86_64
1156 svm
->vmcb
->save
.lstar
= data
;
1159 svm
->vmcb
->save
.cstar
= data
;
1161 case MSR_KERNEL_GS_BASE
:
1162 svm
->vmcb
->save
.kernel_gs_base
= data
;
1164 case MSR_SYSCALL_MASK
:
1165 svm
->vmcb
->save
.sfmask
= data
;
1168 case MSR_IA32_SYSENTER_CS
:
1169 svm
->vmcb
->save
.sysenter_cs
= data
;
1171 case MSR_IA32_SYSENTER_EIP
:
1172 svm
->vmcb
->save
.sysenter_eip
= data
;
1174 case MSR_IA32_SYSENTER_ESP
:
1175 svm
->vmcb
->save
.sysenter_esp
= data
;
1178 return kvm_set_msr_common(vcpu
, ecx
, data
);
1183 static int wrmsr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1185 u32 ecx
= svm
->vcpu
.regs
[VCPU_REGS_RCX
];
1186 u64 data
= (svm
->vmcb
->save
.rax
& -1u)
1187 | ((u64
)(svm
->vcpu
.regs
[VCPU_REGS_RDX
] & -1u) << 32);
1188 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 2;
1189 if (svm_set_msr(&svm
->vcpu
, ecx
, data
))
1190 svm_inject_gp(&svm
->vcpu
, 0);
1192 skip_emulated_instruction(&svm
->vcpu
);
1196 static int msr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1198 if (svm
->vmcb
->control
.exit_info_1
)
1199 return wrmsr_interception(svm
, kvm_run
);
1201 return rdmsr_interception(svm
, kvm_run
);
1204 static int interrupt_window_interception(struct vcpu_svm
*svm
,
1205 struct kvm_run
*kvm_run
)
1207 svm
->vmcb
->control
.intercept
&= ~(1ULL << INTERCEPT_VINTR
);
1208 svm
->vmcb
->control
.int_ctl
&= ~V_IRQ_MASK
;
1210 * If the user space waits to inject interrupts, exit as soon as
1213 if (kvm_run
->request_interrupt_window
&&
1214 !svm
->vcpu
.irq_summary
) {
1215 ++svm
->vcpu
.stat
.irq_window_exits
;
1216 kvm_run
->exit_reason
= KVM_EXIT_IRQ_WINDOW_OPEN
;
1223 static int (*svm_exit_handlers
[])(struct vcpu_svm
*svm
,
1224 struct kvm_run
*kvm_run
) = {
1225 [SVM_EXIT_READ_CR0
] = emulate_on_interception
,
1226 [SVM_EXIT_READ_CR3
] = emulate_on_interception
,
1227 [SVM_EXIT_READ_CR4
] = emulate_on_interception
,
1229 [SVM_EXIT_WRITE_CR0
] = emulate_on_interception
,
1230 [SVM_EXIT_WRITE_CR3
] = emulate_on_interception
,
1231 [SVM_EXIT_WRITE_CR4
] = emulate_on_interception
,
1232 [SVM_EXIT_READ_DR0
] = emulate_on_interception
,
1233 [SVM_EXIT_READ_DR1
] = emulate_on_interception
,
1234 [SVM_EXIT_READ_DR2
] = emulate_on_interception
,
1235 [SVM_EXIT_READ_DR3
] = emulate_on_interception
,
1236 [SVM_EXIT_WRITE_DR0
] = emulate_on_interception
,
1237 [SVM_EXIT_WRITE_DR1
] = emulate_on_interception
,
1238 [SVM_EXIT_WRITE_DR2
] = emulate_on_interception
,
1239 [SVM_EXIT_WRITE_DR3
] = emulate_on_interception
,
1240 [SVM_EXIT_WRITE_DR5
] = emulate_on_interception
,
1241 [SVM_EXIT_WRITE_DR7
] = emulate_on_interception
,
1242 [SVM_EXIT_EXCP_BASE
+ PF_VECTOR
] = pf_interception
,
1243 [SVM_EXIT_EXCP_BASE
+ NM_VECTOR
] = nm_interception
,
1244 [SVM_EXIT_INTR
] = nop_on_interception
,
1245 [SVM_EXIT_NMI
] = nop_on_interception
,
1246 [SVM_EXIT_SMI
] = nop_on_interception
,
1247 [SVM_EXIT_INIT
] = nop_on_interception
,
1248 [SVM_EXIT_VINTR
] = interrupt_window_interception
,
1249 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1250 [SVM_EXIT_CPUID
] = cpuid_interception
,
1251 [SVM_EXIT_HLT
] = halt_interception
,
1252 [SVM_EXIT_INVLPG
] = emulate_on_interception
,
1253 [SVM_EXIT_INVLPGA
] = invalid_op_interception
,
1254 [SVM_EXIT_IOIO
] = io_interception
,
1255 [SVM_EXIT_MSR
] = msr_interception
,
1256 [SVM_EXIT_TASK_SWITCH
] = task_switch_interception
,
1257 [SVM_EXIT_SHUTDOWN
] = shutdown_interception
,
1258 [SVM_EXIT_VMRUN
] = invalid_op_interception
,
1259 [SVM_EXIT_VMMCALL
] = vmmcall_interception
,
1260 [SVM_EXIT_VMLOAD
] = invalid_op_interception
,
1261 [SVM_EXIT_VMSAVE
] = invalid_op_interception
,
1262 [SVM_EXIT_STGI
] = invalid_op_interception
,
1263 [SVM_EXIT_CLGI
] = invalid_op_interception
,
1264 [SVM_EXIT_SKINIT
] = invalid_op_interception
,
1265 [SVM_EXIT_MONITOR
] = invalid_op_interception
,
1266 [SVM_EXIT_MWAIT
] = invalid_op_interception
,
1270 static int handle_exit(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1272 u32 exit_code
= svm
->vmcb
->control
.exit_code
;
1274 if (is_external_interrupt(svm
->vmcb
->control
.exit_int_info
) &&
1275 exit_code
!= SVM_EXIT_EXCP_BASE
+ PF_VECTOR
)
1276 printk(KERN_ERR
"%s: unexpected exit_ini_info 0x%x "
1278 __FUNCTION__
, svm
->vmcb
->control
.exit_int_info
,
1281 if (exit_code
>= ARRAY_SIZE(svm_exit_handlers
)
1282 || svm_exit_handlers
[exit_code
] == 0) {
1283 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1284 kvm_run
->hw
.hardware_exit_reason
= exit_code
;
1288 return svm_exit_handlers
[exit_code
](svm
, kvm_run
);
1291 static void reload_tss(struct kvm_vcpu
*vcpu
)
1293 int cpu
= raw_smp_processor_id();
1295 struct svm_cpu_data
*svm_data
= per_cpu(svm_data
, cpu
);
1296 svm_data
->tss_desc
->type
= 9; //available 32/64-bit TSS
1300 static void pre_svm_run(struct vcpu_svm
*svm
)
1302 int cpu
= raw_smp_processor_id();
1304 struct svm_cpu_data
*svm_data
= per_cpu(svm_data
, cpu
);
1306 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_DO_NOTHING
;
1307 if (svm
->vcpu
.cpu
!= cpu
||
1308 svm
->asid_generation
!= svm_data
->asid_generation
)
1309 new_asid(svm
, svm_data
);
1313 static inline void svm_inject_irq(struct vcpu_svm
*svm
, int irq
)
1315 struct vmcb_control_area
*control
;
1317 control
= &svm
->vmcb
->control
;
1318 control
->int_vector
= irq
;
1319 control
->int_ctl
&= ~V_INTR_PRIO_MASK
;
1320 control
->int_ctl
|= V_IRQ_MASK
|
1321 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT
);
1324 static void svm_set_irq(struct kvm_vcpu
*vcpu
, int irq
)
1326 struct vcpu_svm
*svm
= to_svm(vcpu
);
1328 svm_inject_irq(svm
, irq
);
1331 static void svm_intr_assist(struct vcpu_svm
*svm
)
1333 struct vmcb
*vmcb
= svm
->vmcb
;
1334 int intr_vector
= -1;
1335 struct kvm_vcpu
*vcpu
= &svm
->vcpu
;
1337 kvm_inject_pending_timer_irqs(vcpu
);
1338 if ((vmcb
->control
.exit_int_info
& SVM_EVTINJ_VALID
) &&
1339 ((vmcb
->control
.exit_int_info
& SVM_EVTINJ_TYPE_MASK
) == 0)) {
1340 intr_vector
= vmcb
->control
.exit_int_info
&
1341 SVM_EVTINJ_VEC_MASK
;
1342 vmcb
->control
.exit_int_info
= 0;
1343 svm_inject_irq(svm
, intr_vector
);
1347 if (vmcb
->control
.int_ctl
& V_IRQ_MASK
)
1350 if (!kvm_cpu_has_interrupt(vcpu
))
1353 if (!(vmcb
->save
.rflags
& X86_EFLAGS_IF
) ||
1354 (vmcb
->control
.int_state
& SVM_INTERRUPT_SHADOW_MASK
) ||
1355 (vmcb
->control
.event_inj
& SVM_EVTINJ_VALID
)) {
1356 /* unable to deliver irq, set pending irq */
1357 vmcb
->control
.intercept
|= (1ULL << INTERCEPT_VINTR
);
1358 svm_inject_irq(svm
, 0x0);
1361 /* Okay, we can deliver the interrupt: grab it and update PIC state. */
1362 intr_vector
= kvm_cpu_get_interrupt(vcpu
);
1363 svm_inject_irq(svm
, intr_vector
);
1364 kvm_timer_intr_post(vcpu
, intr_vector
);
1367 static void kvm_reput_irq(struct vcpu_svm
*svm
)
1369 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
1371 if ((control
->int_ctl
& V_IRQ_MASK
)
1372 && !irqchip_in_kernel(svm
->vcpu
.kvm
)) {
1373 control
->int_ctl
&= ~V_IRQ_MASK
;
1374 push_irq(&svm
->vcpu
, control
->int_vector
);
1377 svm
->vcpu
.interrupt_window_open
=
1378 !(control
->int_state
& SVM_INTERRUPT_SHADOW_MASK
);
1381 static void svm_do_inject_vector(struct vcpu_svm
*svm
)
1383 struct kvm_vcpu
*vcpu
= &svm
->vcpu
;
1384 int word_index
= __ffs(vcpu
->irq_summary
);
1385 int bit_index
= __ffs(vcpu
->irq_pending
[word_index
]);
1386 int irq
= word_index
* BITS_PER_LONG
+ bit_index
;
1388 clear_bit(bit_index
, &vcpu
->irq_pending
[word_index
]);
1389 if (!vcpu
->irq_pending
[word_index
])
1390 clear_bit(word_index
, &vcpu
->irq_summary
);
1391 svm_inject_irq(svm
, irq
);
1394 static void do_interrupt_requests(struct vcpu_svm
*svm
,
1395 struct kvm_run
*kvm_run
)
1397 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
1399 svm
->vcpu
.interrupt_window_open
=
1400 (!(control
->int_state
& SVM_INTERRUPT_SHADOW_MASK
) &&
1401 (svm
->vmcb
->save
.rflags
& X86_EFLAGS_IF
));
1403 if (svm
->vcpu
.interrupt_window_open
&& svm
->vcpu
.irq_summary
)
1405 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1407 svm_do_inject_vector(svm
);
1410 * Interrupts blocked. Wait for unblock.
1412 if (!svm
->vcpu
.interrupt_window_open
&&
1413 (svm
->vcpu
.irq_summary
|| kvm_run
->request_interrupt_window
)) {
1414 control
->intercept
|= 1ULL << INTERCEPT_VINTR
;
1416 control
->intercept
&= ~(1ULL << INTERCEPT_VINTR
);
1419 static void post_kvm_run_save(struct vcpu_svm
*svm
,
1420 struct kvm_run
*kvm_run
)
1422 if (irqchip_in_kernel(svm
->vcpu
.kvm
))
1423 kvm_run
->ready_for_interrupt_injection
= 1;
1425 kvm_run
->ready_for_interrupt_injection
=
1426 (svm
->vcpu
.interrupt_window_open
&&
1427 svm
->vcpu
.irq_summary
== 0);
1428 kvm_run
->if_flag
= (svm
->vmcb
->save
.rflags
& X86_EFLAGS_IF
) != 0;
1429 kvm_run
->cr8
= get_cr8(&svm
->vcpu
);
1430 kvm_run
->apic_base
= kvm_get_apic_base(&svm
->vcpu
);
1434 * Check if userspace requested an interrupt window, and that the
1435 * interrupt window is open.
1437 * No need to exit to userspace if we already have an interrupt queued.
1439 static int dm_request_for_irq_injection(struct vcpu_svm
*svm
,
1440 struct kvm_run
*kvm_run
)
1442 return (!svm
->vcpu
.irq_summary
&&
1443 kvm_run
->request_interrupt_window
&&
1444 svm
->vcpu
.interrupt_window_open
&&
1445 (svm
->vmcb
->save
.rflags
& X86_EFLAGS_IF
));
1448 static void save_db_regs(unsigned long *db_regs
)
1450 asm volatile ("mov %%dr0, %0" : "=r"(db_regs
[0]));
1451 asm volatile ("mov %%dr1, %0" : "=r"(db_regs
[1]));
1452 asm volatile ("mov %%dr2, %0" : "=r"(db_regs
[2]));
1453 asm volatile ("mov %%dr3, %0" : "=r"(db_regs
[3]));
1456 static void load_db_regs(unsigned long *db_regs
)
1458 asm volatile ("mov %0, %%dr0" : : "r"(db_regs
[0]));
1459 asm volatile ("mov %0, %%dr1" : : "r"(db_regs
[1]));
1460 asm volatile ("mov %0, %%dr2" : : "r"(db_regs
[2]));
1461 asm volatile ("mov %0, %%dr3" : : "r"(db_regs
[3]));
1464 static void svm_flush_tlb(struct kvm_vcpu
*vcpu
)
1466 force_new_asid(vcpu
);
1469 static int svm_vcpu_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1471 struct vcpu_svm
*svm
= to_svm(vcpu
);
1478 r
= kvm_mmu_reload(vcpu
);
1484 if (signal_pending(current
)) {
1486 ++vcpu
->stat
.signal_exits
;
1487 post_kvm_run_save(svm
, kvm_run
);
1488 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
1492 if (irqchip_in_kernel(vcpu
->kvm
))
1493 svm_intr_assist(svm
);
1494 else if (!vcpu
->mmio_read_completed
)
1495 do_interrupt_requests(svm
, kvm_run
);
1497 vcpu
->guest_mode
= 1;
1499 if (test_and_clear_bit(KVM_TLB_FLUSH
, &vcpu
->requests
))
1500 svm_flush_tlb(vcpu
);
1504 save_host_msrs(vcpu
);
1505 fs_selector
= read_fs();
1506 gs_selector
= read_gs();
1507 ldt_selector
= read_ldt();
1508 svm
->host_cr2
= kvm_read_cr2();
1509 svm
->host_dr6
= read_dr6();
1510 svm
->host_dr7
= read_dr7();
1511 svm
->vmcb
->save
.cr2
= vcpu
->cr2
;
1513 if (svm
->vmcb
->save
.dr7
& 0xff) {
1515 save_db_regs(svm
->host_db_regs
);
1516 load_db_regs(svm
->db_regs
);
1519 if (vcpu
->fpu_active
) {
1520 fx_save(&vcpu
->host_fx_image
);
1521 fx_restore(&vcpu
->guest_fx_image
);
1525 #ifdef CONFIG_X86_64
1526 "push %%rbx; push %%rcx; push %%rdx;"
1527 "push %%rsi; push %%rdi; push %%rbp;"
1528 "push %%r8; push %%r9; push %%r10; push %%r11;"
1529 "push %%r12; push %%r13; push %%r14; push %%r15;"
1531 "push %%ebx; push %%ecx; push %%edx;"
1532 "push %%esi; push %%edi; push %%ebp;"
1535 #ifdef CONFIG_X86_64
1536 "mov %c[rbx](%[svm]), %%rbx \n\t"
1537 "mov %c[rcx](%[svm]), %%rcx \n\t"
1538 "mov %c[rdx](%[svm]), %%rdx \n\t"
1539 "mov %c[rsi](%[svm]), %%rsi \n\t"
1540 "mov %c[rdi](%[svm]), %%rdi \n\t"
1541 "mov %c[rbp](%[svm]), %%rbp \n\t"
1542 "mov %c[r8](%[svm]), %%r8 \n\t"
1543 "mov %c[r9](%[svm]), %%r9 \n\t"
1544 "mov %c[r10](%[svm]), %%r10 \n\t"
1545 "mov %c[r11](%[svm]), %%r11 \n\t"
1546 "mov %c[r12](%[svm]), %%r12 \n\t"
1547 "mov %c[r13](%[svm]), %%r13 \n\t"
1548 "mov %c[r14](%[svm]), %%r14 \n\t"
1549 "mov %c[r15](%[svm]), %%r15 \n\t"
1551 "mov %c[rbx](%[svm]), %%ebx \n\t"
1552 "mov %c[rcx](%[svm]), %%ecx \n\t"
1553 "mov %c[rdx](%[svm]), %%edx \n\t"
1554 "mov %c[rsi](%[svm]), %%esi \n\t"
1555 "mov %c[rdi](%[svm]), %%edi \n\t"
1556 "mov %c[rbp](%[svm]), %%ebp \n\t"
1559 #ifdef CONFIG_X86_64
1560 /* Enter guest mode */
1562 "mov %c[vmcb](%[svm]), %%rax \n\t"
1568 /* Enter guest mode */
1570 "mov %c[vmcb](%[svm]), %%eax \n\t"
1577 /* Save guest registers, load host registers */
1578 #ifdef CONFIG_X86_64
1579 "mov %%rbx, %c[rbx](%[svm]) \n\t"
1580 "mov %%rcx, %c[rcx](%[svm]) \n\t"
1581 "mov %%rdx, %c[rdx](%[svm]) \n\t"
1582 "mov %%rsi, %c[rsi](%[svm]) \n\t"
1583 "mov %%rdi, %c[rdi](%[svm]) \n\t"
1584 "mov %%rbp, %c[rbp](%[svm]) \n\t"
1585 "mov %%r8, %c[r8](%[svm]) \n\t"
1586 "mov %%r9, %c[r9](%[svm]) \n\t"
1587 "mov %%r10, %c[r10](%[svm]) \n\t"
1588 "mov %%r11, %c[r11](%[svm]) \n\t"
1589 "mov %%r12, %c[r12](%[svm]) \n\t"
1590 "mov %%r13, %c[r13](%[svm]) \n\t"
1591 "mov %%r14, %c[r14](%[svm]) \n\t"
1592 "mov %%r15, %c[r15](%[svm]) \n\t"
1594 "pop %%r15; pop %%r14; pop %%r13; pop %%r12;"
1595 "pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
1596 "pop %%rbp; pop %%rdi; pop %%rsi;"
1597 "pop %%rdx; pop %%rcx; pop %%rbx; \n\t"
1599 "mov %%ebx, %c[rbx](%[svm]) \n\t"
1600 "mov %%ecx, %c[rcx](%[svm]) \n\t"
1601 "mov %%edx, %c[rdx](%[svm]) \n\t"
1602 "mov %%esi, %c[rsi](%[svm]) \n\t"
1603 "mov %%edi, %c[rdi](%[svm]) \n\t"
1604 "mov %%ebp, %c[rbp](%[svm]) \n\t"
1606 "pop %%ebp; pop %%edi; pop %%esi;"
1607 "pop %%edx; pop %%ecx; pop %%ebx; \n\t"
1611 [vmcb
]"i"(offsetof(struct vcpu_svm
, vmcb_pa
)),
1612 [rbx
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RBX
])),
1613 [rcx
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RCX
])),
1614 [rdx
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RDX
])),
1615 [rsi
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RSI
])),
1616 [rdi
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RDI
])),
1617 [rbp
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RBP
]))
1618 #ifdef CONFIG_X86_64
1619 ,[r8
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R8
])),
1620 [r9
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R9
])),
1621 [r10
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R10
])),
1622 [r11
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R11
])),
1623 [r12
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R12
])),
1624 [r13
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R13
])),
1625 [r14
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R14
])),
1626 [r15
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R15
]))
1630 vcpu
->guest_mode
= 0;
1632 if (vcpu
->fpu_active
) {
1633 fx_save(&vcpu
->guest_fx_image
);
1634 fx_restore(&vcpu
->host_fx_image
);
1637 if ((svm
->vmcb
->save
.dr7
& 0xff))
1638 load_db_regs(svm
->host_db_regs
);
1640 vcpu
->cr2
= svm
->vmcb
->save
.cr2
;
1642 write_dr6(svm
->host_dr6
);
1643 write_dr7(svm
->host_dr7
);
1644 kvm_write_cr2(svm
->host_cr2
);
1646 load_fs(fs_selector
);
1647 load_gs(gs_selector
);
1648 load_ldt(ldt_selector
);
1649 load_host_msrs(vcpu
);
1654 * Profile KVM exit RIPs:
1656 if (unlikely(prof_on
== KVM_PROFILING
))
1657 profile_hit(KVM_PROFILING
,
1658 (void *)(unsigned long)svm
->vmcb
->save
.rip
);
1666 if (svm
->vmcb
->control
.exit_code
== SVM_EXIT_ERR
) {
1667 kvm_run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
1668 kvm_run
->fail_entry
.hardware_entry_failure_reason
1669 = svm
->vmcb
->control
.exit_code
;
1670 post_kvm_run_save(svm
, kvm_run
);
1674 r
= handle_exit(svm
, kvm_run
);
1676 if (dm_request_for_irq_injection(svm
, kvm_run
)) {
1677 ++vcpu
->stat
.request_irq_exits
;
1678 post_kvm_run_save(svm
, kvm_run
);
1679 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
1685 post_kvm_run_save(svm
, kvm_run
);
1689 static void svm_set_cr3(struct kvm_vcpu
*vcpu
, unsigned long root
)
1691 struct vcpu_svm
*svm
= to_svm(vcpu
);
1693 svm
->vmcb
->save
.cr3
= root
;
1694 force_new_asid(vcpu
);
1696 if (vcpu
->fpu_active
) {
1697 svm
->vmcb
->control
.intercept_exceptions
|= (1 << NM_VECTOR
);
1698 svm
->vmcb
->save
.cr0
|= X86_CR0_TS
;
1699 vcpu
->fpu_active
= 0;
1703 static void svm_inject_page_fault(struct kvm_vcpu
*vcpu
,
1707 struct vcpu_svm
*svm
= to_svm(vcpu
);
1708 uint32_t exit_int_info
= svm
->vmcb
->control
.exit_int_info
;
1710 ++vcpu
->stat
.pf_guest
;
1712 if (is_page_fault(exit_int_info
)) {
1714 svm
->vmcb
->control
.event_inj_err
= 0;
1715 svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
1716 SVM_EVTINJ_VALID_ERR
|
1717 SVM_EVTINJ_TYPE_EXEPT
|
1722 svm
->vmcb
->save
.cr2
= addr
;
1723 svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
1724 SVM_EVTINJ_VALID_ERR
|
1725 SVM_EVTINJ_TYPE_EXEPT
|
1727 svm
->vmcb
->control
.event_inj_err
= err_code
;
1731 static int is_disabled(void)
1735 rdmsrl(MSR_VM_CR
, vm_cr
);
1736 if (vm_cr
& (1 << SVM_VM_CR_SVM_DISABLE
))
1743 svm_patch_hypercall(struct kvm_vcpu
*vcpu
, unsigned char *hypercall
)
1746 * Patch in the VMMCALL instruction:
1748 hypercall
[0] = 0x0f;
1749 hypercall
[1] = 0x01;
1750 hypercall
[2] = 0xd9;
1751 hypercall
[3] = 0xc3;
1754 static void svm_check_processor_compat(void *rtn
)
1759 static struct kvm_arch_ops svm_arch_ops
= {
1760 .cpu_has_kvm_support
= has_svm
,
1761 .disabled_by_bios
= is_disabled
,
1762 .hardware_setup
= svm_hardware_setup
,
1763 .hardware_unsetup
= svm_hardware_unsetup
,
1764 .check_processor_compatibility
= svm_check_processor_compat
,
1765 .hardware_enable
= svm_hardware_enable
,
1766 .hardware_disable
= svm_hardware_disable
,
1768 .vcpu_create
= svm_create_vcpu
,
1769 .vcpu_free
= svm_free_vcpu
,
1771 .vcpu_load
= svm_vcpu_load
,
1772 .vcpu_put
= svm_vcpu_put
,
1773 .vcpu_decache
= svm_vcpu_decache
,
1775 .set_guest_debug
= svm_guest_debug
,
1776 .get_msr
= svm_get_msr
,
1777 .set_msr
= svm_set_msr
,
1778 .get_segment_base
= svm_get_segment_base
,
1779 .get_segment
= svm_get_segment
,
1780 .set_segment
= svm_set_segment
,
1781 .get_cs_db_l_bits
= svm_get_cs_db_l_bits
,
1782 .decache_cr4_guest_bits
= svm_decache_cr4_guest_bits
,
1783 .set_cr0
= svm_set_cr0
,
1784 .set_cr3
= svm_set_cr3
,
1785 .set_cr4
= svm_set_cr4
,
1786 .set_efer
= svm_set_efer
,
1787 .get_idt
= svm_get_idt
,
1788 .set_idt
= svm_set_idt
,
1789 .get_gdt
= svm_get_gdt
,
1790 .set_gdt
= svm_set_gdt
,
1791 .get_dr
= svm_get_dr
,
1792 .set_dr
= svm_set_dr
,
1793 .cache_regs
= svm_cache_regs
,
1794 .decache_regs
= svm_decache_regs
,
1795 .get_rflags
= svm_get_rflags
,
1796 .set_rflags
= svm_set_rflags
,
1798 .invlpg
= svm_invlpg
,
1799 .tlb_flush
= svm_flush_tlb
,
1800 .inject_page_fault
= svm_inject_page_fault
,
1802 .inject_gp
= svm_inject_gp
,
1804 .run
= svm_vcpu_run
,
1805 .skip_emulated_instruction
= skip_emulated_instruction
,
1806 .patch_hypercall
= svm_patch_hypercall
,
1807 .get_irq
= svm_get_irq
,
1808 .set_irq
= svm_set_irq
,
1811 static int __init
svm_init(void)
1813 return kvm_init_arch(&svm_arch_ops
, sizeof(struct vcpu_svm
),
1817 static void __exit
svm_exit(void)
1822 module_init(svm_init
)
1823 module_exit(svm_exit
)