2 * Kernel-based Virtual Machine driver for Linux
6 * Copyright (C) 2006 Qumranet, Inc.
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
18 #include "x86_emulate.h"
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/vmalloc.h>
23 #include <linux/highmem.h>
24 #include <linux/profile.h>
25 #include <linux/sched.h>
29 MODULE_AUTHOR("Qumranet");
30 MODULE_LICENSE("GPL");
32 #define IOPM_ALLOC_ORDER 2
33 #define MSRPM_ALLOC_ORDER 1
39 #define DR7_GD_MASK (1 << 13)
40 #define DR6_BD_MASK (1 << 13)
42 #define SEG_TYPE_LDT 2
43 #define SEG_TYPE_BUSY_TSS16 3
45 #define KVM_EFER_LMA (1 << 10)
46 #define KVM_EFER_LME (1 << 8)
48 #define SVM_FEATURE_NPT (1 << 0)
49 #define SVM_FEATURE_LBRV (1 << 1)
50 #define SVM_DEATURE_SVML (1 << 2)
52 static inline struct vcpu_svm
*to_svm(struct kvm_vcpu
*vcpu
)
54 return container_of(vcpu
, struct vcpu_svm
, vcpu
);
57 unsigned long iopm_base
;
58 unsigned long msrpm_base
;
60 struct kvm_ldttss_desc
{
63 unsigned base1
: 8, type
: 5, dpl
: 2, p
: 1;
64 unsigned limit1
: 4, zero0
: 3, g
: 1, base2
: 8;
67 } __attribute__((packed
));
75 struct kvm_ldttss_desc
*tss_desc
;
77 struct page
*save_area
;
80 static DEFINE_PER_CPU(struct svm_cpu_data
*, svm_data
);
81 static uint32_t svm_features
;
83 struct svm_init_data
{
88 static u32 msrpm_ranges
[] = {0, 0xc0000000, 0xc0010000};
90 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
91 #define MSRS_RANGE_SIZE 2048
92 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
94 #define MAX_INST_SIZE 15
96 static inline u32
svm_has(u32 feat
)
98 return svm_features
& feat
;
101 static unsigned get_addr_size(struct kvm_vcpu
*vcpu
)
103 struct vmcb_save_area
*sa
= &to_svm(vcpu
)->vmcb
->save
;
106 if (!(sa
->cr0
& X86_CR0_PE
) || (sa
->rflags
& X86_EFLAGS_VM
))
109 cs_attrib
= sa
->cs
.attrib
;
111 return (cs_attrib
& SVM_SELECTOR_L_MASK
) ? 8 :
112 (cs_attrib
& SVM_SELECTOR_DB_MASK
) ? 4 : 2;
115 static inline u8
pop_irq(struct kvm_vcpu
*vcpu
)
117 int word_index
= __ffs(vcpu
->irq_summary
);
118 int bit_index
= __ffs(vcpu
->irq_pending
[word_index
]);
119 int irq
= word_index
* BITS_PER_LONG
+ bit_index
;
121 clear_bit(bit_index
, &vcpu
->irq_pending
[word_index
]);
122 if (!vcpu
->irq_pending
[word_index
])
123 clear_bit(word_index
, &vcpu
->irq_summary
);
127 static inline void push_irq(struct kvm_vcpu
*vcpu
, u8 irq
)
129 set_bit(irq
, vcpu
->irq_pending
);
130 set_bit(irq
/ BITS_PER_LONG
, &vcpu
->irq_summary
);
133 static inline void clgi(void)
135 asm volatile (SVM_CLGI
);
138 static inline void stgi(void)
140 asm volatile (SVM_STGI
);
143 static inline void invlpga(unsigned long addr
, u32 asid
)
145 asm volatile (SVM_INVLPGA :: "a"(addr
), "c"(asid
));
148 static inline unsigned long kvm_read_cr2(void)
152 asm volatile ("mov %%cr2, %0" : "=r" (cr2
));
156 static inline void kvm_write_cr2(unsigned long val
)
158 asm volatile ("mov %0, %%cr2" :: "r" (val
));
161 static inline unsigned long read_dr6(void)
165 asm volatile ("mov %%dr6, %0" : "=r" (dr6
));
169 static inline void write_dr6(unsigned long val
)
171 asm volatile ("mov %0, %%dr6" :: "r" (val
));
174 static inline unsigned long read_dr7(void)
178 asm volatile ("mov %%dr7, %0" : "=r" (dr7
));
182 static inline void write_dr7(unsigned long val
)
184 asm volatile ("mov %0, %%dr7" :: "r" (val
));
187 static inline void force_new_asid(struct kvm_vcpu
*vcpu
)
189 to_svm(vcpu
)->asid_generation
--;
192 static inline void flush_guest_tlb(struct kvm_vcpu
*vcpu
)
194 force_new_asid(vcpu
);
197 static void svm_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
199 if (!(efer
& KVM_EFER_LMA
))
200 efer
&= ~KVM_EFER_LME
;
202 to_svm(vcpu
)->vmcb
->save
.efer
= efer
| MSR_EFER_SVME_MASK
;
203 vcpu
->shadow_efer
= efer
;
206 static void svm_inject_gp(struct kvm_vcpu
*vcpu
, unsigned error_code
)
208 struct vcpu_svm
*svm
= to_svm(vcpu
);
210 svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
211 SVM_EVTINJ_VALID_ERR
|
212 SVM_EVTINJ_TYPE_EXEPT
|
214 svm
->vmcb
->control
.event_inj_err
= error_code
;
217 static void inject_ud(struct kvm_vcpu
*vcpu
)
219 to_svm(vcpu
)->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
220 SVM_EVTINJ_TYPE_EXEPT
|
224 static int is_page_fault(uint32_t info
)
226 info
&= SVM_EVTINJ_VEC_MASK
| SVM_EVTINJ_TYPE_MASK
| SVM_EVTINJ_VALID
;
227 return info
== (PF_VECTOR
| SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_EXEPT
);
230 static int is_external_interrupt(u32 info
)
232 info
&= SVM_EVTINJ_TYPE_MASK
| SVM_EVTINJ_VALID
;
233 return info
== (SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_INTR
);
236 static void skip_emulated_instruction(struct kvm_vcpu
*vcpu
)
238 struct vcpu_svm
*svm
= to_svm(vcpu
);
240 if (!svm
->next_rip
) {
241 printk(KERN_DEBUG
"%s: NOP\n", __FUNCTION__
);
244 if (svm
->next_rip
- svm
->vmcb
->save
.rip
> 15) {
245 printk(KERN_ERR
"%s: ip 0x%llx next 0x%llx\n",
251 vcpu
->rip
= svm
->vmcb
->save
.rip
= svm
->next_rip
;
252 svm
->vmcb
->control
.int_state
&= ~SVM_INTERRUPT_SHADOW_MASK
;
254 vcpu
->interrupt_window_open
= 1;
257 static int has_svm(void)
259 uint32_t eax
, ebx
, ecx
, edx
;
261 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
) {
262 printk(KERN_INFO
"has_svm: not amd\n");
266 cpuid(0x80000000, &eax
, &ebx
, &ecx
, &edx
);
267 if (eax
< SVM_CPUID_FUNC
) {
268 printk(KERN_INFO
"has_svm: can't execute cpuid_8000000a\n");
272 cpuid(0x80000001, &eax
, &ebx
, &ecx
, &edx
);
273 if (!(ecx
& (1 << SVM_CPUID_FEATURE_SHIFT
))) {
274 printk(KERN_DEBUG
"has_svm: svm not available\n");
280 static void svm_hardware_disable(void *garbage
)
282 struct svm_cpu_data
*svm_data
283 = per_cpu(svm_data
, raw_smp_processor_id());
288 wrmsrl(MSR_VM_HSAVE_PA
, 0);
289 rdmsrl(MSR_EFER
, efer
);
290 wrmsrl(MSR_EFER
, efer
& ~MSR_EFER_SVME_MASK
);
291 per_cpu(svm_data
, raw_smp_processor_id()) = NULL
;
292 __free_page(svm_data
->save_area
);
297 static void svm_hardware_enable(void *garbage
)
300 struct svm_cpu_data
*svm_data
;
303 struct desc_ptr gdt_descr
;
305 struct Xgt_desc_struct gdt_descr
;
307 struct desc_struct
*gdt
;
308 int me
= raw_smp_processor_id();
311 printk(KERN_ERR
"svm_cpu_init: err EOPNOTSUPP on %d\n", me
);
314 svm_data
= per_cpu(svm_data
, me
);
317 printk(KERN_ERR
"svm_cpu_init: svm_data is NULL on %d\n",
322 svm_data
->asid_generation
= 1;
323 svm_data
->max_asid
= cpuid_ebx(SVM_CPUID_FUNC
) - 1;
324 svm_data
->next_asid
= svm_data
->max_asid
+ 1;
325 svm_features
= cpuid_edx(SVM_CPUID_FUNC
);
327 asm volatile ( "sgdt %0" : "=m"(gdt_descr
) );
328 gdt
= (struct desc_struct
*)gdt_descr
.address
;
329 svm_data
->tss_desc
= (struct kvm_ldttss_desc
*)(gdt
+ GDT_ENTRY_TSS
);
331 rdmsrl(MSR_EFER
, efer
);
332 wrmsrl(MSR_EFER
, efer
| MSR_EFER_SVME_MASK
);
334 wrmsrl(MSR_VM_HSAVE_PA
,
335 page_to_pfn(svm_data
->save_area
) << PAGE_SHIFT
);
338 static int svm_cpu_init(int cpu
)
340 struct svm_cpu_data
*svm_data
;
343 svm_data
= kzalloc(sizeof(struct svm_cpu_data
), GFP_KERNEL
);
347 svm_data
->save_area
= alloc_page(GFP_KERNEL
);
349 if (!svm_data
->save_area
)
352 per_cpu(svm_data
, cpu
) = svm_data
;
362 static int set_msr_interception(u32
*msrpm
, unsigned msr
,
367 for (i
= 0; i
< NUM_MSR_MAPS
; i
++) {
368 if (msr
>= msrpm_ranges
[i
] &&
369 msr
< msrpm_ranges
[i
] + MSRS_IN_RANGE
) {
370 u32 msr_offset
= (i
* MSRS_IN_RANGE
+ msr
-
371 msrpm_ranges
[i
]) * 2;
373 u32
*base
= msrpm
+ (msr_offset
/ 32);
374 u32 msr_shift
= msr_offset
% 32;
375 u32 mask
= ((write
) ? 0 : 2) | ((read
) ? 0 : 1);
376 *base
= (*base
& ~(0x3 << msr_shift
)) |
381 printk(KERN_DEBUG
"%s: not found 0x%x\n", __FUNCTION__
, msr
);
385 static __init
int svm_hardware_setup(void)
388 struct page
*iopm_pages
;
389 struct page
*msrpm_pages
;
390 void *iopm_va
, *msrpm_va
;
393 kvm_emulator_want_group7_invlpg();
395 iopm_pages
= alloc_pages(GFP_KERNEL
, IOPM_ALLOC_ORDER
);
400 iopm_va
= page_address(iopm_pages
);
401 memset(iopm_va
, 0xff, PAGE_SIZE
* (1 << IOPM_ALLOC_ORDER
));
402 clear_bit(0x80, iopm_va
); /* allow direct access to PC debug port */
403 iopm_base
= page_to_pfn(iopm_pages
) << PAGE_SHIFT
;
406 msrpm_pages
= alloc_pages(GFP_KERNEL
, MSRPM_ALLOC_ORDER
);
412 msrpm_va
= page_address(msrpm_pages
);
413 memset(msrpm_va
, 0xff, PAGE_SIZE
* (1 << MSRPM_ALLOC_ORDER
));
414 msrpm_base
= page_to_pfn(msrpm_pages
) << PAGE_SHIFT
;
417 set_msr_interception(msrpm_va
, MSR_GS_BASE
, 1, 1);
418 set_msr_interception(msrpm_va
, MSR_FS_BASE
, 1, 1);
419 set_msr_interception(msrpm_va
, MSR_KERNEL_GS_BASE
, 1, 1);
420 set_msr_interception(msrpm_va
, MSR_LSTAR
, 1, 1);
421 set_msr_interception(msrpm_va
, MSR_CSTAR
, 1, 1);
422 set_msr_interception(msrpm_va
, MSR_SYSCALL_MASK
, 1, 1);
424 set_msr_interception(msrpm_va
, MSR_K6_STAR
, 1, 1);
425 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_CS
, 1, 1);
426 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_ESP
, 1, 1);
427 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_EIP
, 1, 1);
429 for_each_online_cpu(cpu
) {
430 r
= svm_cpu_init(cpu
);
437 __free_pages(msrpm_pages
, MSRPM_ALLOC_ORDER
);
440 __free_pages(iopm_pages
, IOPM_ALLOC_ORDER
);
445 static __exit
void svm_hardware_unsetup(void)
447 __free_pages(pfn_to_page(msrpm_base
>> PAGE_SHIFT
), MSRPM_ALLOC_ORDER
);
448 __free_pages(pfn_to_page(iopm_base
>> PAGE_SHIFT
), IOPM_ALLOC_ORDER
);
449 iopm_base
= msrpm_base
= 0;
452 static void init_seg(struct vmcb_seg
*seg
)
455 seg
->attrib
= SVM_SELECTOR_P_MASK
| SVM_SELECTOR_S_MASK
|
456 SVM_SELECTOR_WRITE_MASK
; /* Read/Write Data Segment */
461 static void init_sys_seg(struct vmcb_seg
*seg
, uint32_t type
)
464 seg
->attrib
= SVM_SELECTOR_P_MASK
| type
;
469 static void init_vmcb(struct vmcb
*vmcb
)
471 struct vmcb_control_area
*control
= &vmcb
->control
;
472 struct vmcb_save_area
*save
= &vmcb
->save
;
474 control
->intercept_cr_read
= INTERCEPT_CR0_MASK
|
478 control
->intercept_cr_write
= INTERCEPT_CR0_MASK
|
482 control
->intercept_dr_read
= INTERCEPT_DR0_MASK
|
487 control
->intercept_dr_write
= INTERCEPT_DR0_MASK
|
494 control
->intercept_exceptions
= 1 << PF_VECTOR
;
497 control
->intercept
= (1ULL << INTERCEPT_INTR
) |
498 (1ULL << INTERCEPT_NMI
) |
499 (1ULL << INTERCEPT_SMI
) |
501 * selective cr0 intercept bug?
502 * 0: 0f 22 d8 mov %eax,%cr3
503 * 3: 0f 20 c0 mov %cr0,%eax
504 * 6: 0d 00 00 00 80 or $0x80000000,%eax
505 * b: 0f 22 c0 mov %eax,%cr0
506 * set cr3 ->interception
507 * get cr0 ->interception
508 * set cr0 -> no interception
510 /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */
511 (1ULL << INTERCEPT_CPUID
) |
512 (1ULL << INTERCEPT_HLT
) |
513 (1ULL << INTERCEPT_INVLPGA
) |
514 (1ULL << INTERCEPT_IOIO_PROT
) |
515 (1ULL << INTERCEPT_MSR_PROT
) |
516 (1ULL << INTERCEPT_TASK_SWITCH
) |
517 (1ULL << INTERCEPT_SHUTDOWN
) |
518 (1ULL << INTERCEPT_VMRUN
) |
519 (1ULL << INTERCEPT_VMMCALL
) |
520 (1ULL << INTERCEPT_VMLOAD
) |
521 (1ULL << INTERCEPT_VMSAVE
) |
522 (1ULL << INTERCEPT_STGI
) |
523 (1ULL << INTERCEPT_CLGI
) |
524 (1ULL << INTERCEPT_SKINIT
) |
525 (1ULL << INTERCEPT_MONITOR
) |
526 (1ULL << INTERCEPT_MWAIT
);
528 control
->iopm_base_pa
= iopm_base
;
529 control
->msrpm_base_pa
= msrpm_base
;
530 control
->tsc_offset
= 0;
531 control
->int_ctl
= V_INTR_MASKING_MASK
;
539 save
->cs
.selector
= 0xf000;
540 /* Executable/Readable Code Segment */
541 save
->cs
.attrib
= SVM_SELECTOR_READ_MASK
| SVM_SELECTOR_P_MASK
|
542 SVM_SELECTOR_S_MASK
| SVM_SELECTOR_CODE_MASK
;
543 save
->cs
.limit
= 0xffff;
545 * cs.base should really be 0xffff0000, but vmx can't handle that, so
546 * be consistent with it.
548 * Replace when we have real mode working for vmx.
550 save
->cs
.base
= 0xf0000;
552 save
->gdtr
.limit
= 0xffff;
553 save
->idtr
.limit
= 0xffff;
555 init_sys_seg(&save
->ldtr
, SEG_TYPE_LDT
);
556 init_sys_seg(&save
->tr
, SEG_TYPE_BUSY_TSS16
);
558 save
->efer
= MSR_EFER_SVME_MASK
;
560 save
->dr6
= 0xffff0ff0;
563 save
->rip
= 0x0000fff0;
566 * cr0 val on cpu init should be 0x60000010, we enable cpu
567 * cache by default. the orderly way is to enable cache in bios.
569 save
->cr0
= 0x00000010 | X86_CR0_PG
| X86_CR0_WP
;
570 save
->cr4
= X86_CR4_PAE
;
574 static struct kvm_vcpu
*svm_create_vcpu(struct kvm
*kvm
, unsigned int id
)
576 struct vcpu_svm
*svm
;
580 svm
= kzalloc(sizeof *svm
, GFP_KERNEL
);
586 err
= kvm_vcpu_init(&svm
->vcpu
, kvm
, id
);
590 page
= alloc_page(GFP_KERNEL
);
596 svm
->vmcb
= page_address(page
);
597 clear_page(svm
->vmcb
);
598 svm
->vmcb_pa
= page_to_pfn(page
) << PAGE_SHIFT
;
599 svm
->asid_generation
= 0;
600 memset(svm
->db_regs
, 0, sizeof(svm
->db_regs
));
601 init_vmcb(svm
->vmcb
);
604 svm
->vcpu
.fpu_active
= 1;
605 svm
->vcpu
.apic_base
= 0xfee00000 | MSR_IA32_APICBASE_ENABLE
;
606 if (svm
->vcpu
.vcpu_id
== 0)
607 svm
->vcpu
.apic_base
|= MSR_IA32_APICBASE_BSP
;
612 kvm_vcpu_uninit(&svm
->vcpu
);
619 static void svm_free_vcpu(struct kvm_vcpu
*vcpu
)
621 struct vcpu_svm
*svm
= to_svm(vcpu
);
623 __free_page(pfn_to_page(svm
->vmcb_pa
>> PAGE_SHIFT
));
624 kvm_vcpu_uninit(vcpu
);
628 static void svm_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
630 struct vcpu_svm
*svm
= to_svm(vcpu
);
633 if (unlikely(cpu
!= vcpu
->cpu
)) {
637 * Make sure that the guest sees a monotonically
641 delta
= vcpu
->host_tsc
- tsc_this
;
642 svm
->vmcb
->control
.tsc_offset
+= delta
;
646 for (i
= 0; i
< NR_HOST_SAVE_USER_MSRS
; i
++)
647 rdmsrl(host_save_user_msrs
[i
], svm
->host_user_msrs
[i
]);
650 static void svm_vcpu_put(struct kvm_vcpu
*vcpu
)
652 struct vcpu_svm
*svm
= to_svm(vcpu
);
655 for (i
= 0; i
< NR_HOST_SAVE_USER_MSRS
; i
++)
656 wrmsrl(host_save_user_msrs
[i
], svm
->host_user_msrs
[i
]);
658 rdtscll(vcpu
->host_tsc
);
661 static void svm_vcpu_decache(struct kvm_vcpu
*vcpu
)
665 static void svm_cache_regs(struct kvm_vcpu
*vcpu
)
667 struct vcpu_svm
*svm
= to_svm(vcpu
);
669 vcpu
->regs
[VCPU_REGS_RAX
] = svm
->vmcb
->save
.rax
;
670 vcpu
->regs
[VCPU_REGS_RSP
] = svm
->vmcb
->save
.rsp
;
671 vcpu
->rip
= svm
->vmcb
->save
.rip
;
674 static void svm_decache_regs(struct kvm_vcpu
*vcpu
)
676 struct vcpu_svm
*svm
= to_svm(vcpu
);
677 svm
->vmcb
->save
.rax
= vcpu
->regs
[VCPU_REGS_RAX
];
678 svm
->vmcb
->save
.rsp
= vcpu
->regs
[VCPU_REGS_RSP
];
679 svm
->vmcb
->save
.rip
= vcpu
->rip
;
682 static unsigned long svm_get_rflags(struct kvm_vcpu
*vcpu
)
684 return to_svm(vcpu
)->vmcb
->save
.rflags
;
687 static void svm_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
)
689 to_svm(vcpu
)->vmcb
->save
.rflags
= rflags
;
692 static struct vmcb_seg
*svm_seg(struct kvm_vcpu
*vcpu
, int seg
)
694 struct vmcb_save_area
*save
= &to_svm(vcpu
)->vmcb
->save
;
697 case VCPU_SREG_CS
: return &save
->cs
;
698 case VCPU_SREG_DS
: return &save
->ds
;
699 case VCPU_SREG_ES
: return &save
->es
;
700 case VCPU_SREG_FS
: return &save
->fs
;
701 case VCPU_SREG_GS
: return &save
->gs
;
702 case VCPU_SREG_SS
: return &save
->ss
;
703 case VCPU_SREG_TR
: return &save
->tr
;
704 case VCPU_SREG_LDTR
: return &save
->ldtr
;
710 static u64
svm_get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
712 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
717 static void svm_get_segment(struct kvm_vcpu
*vcpu
,
718 struct kvm_segment
*var
, int seg
)
720 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
723 var
->limit
= s
->limit
;
724 var
->selector
= s
->selector
;
725 var
->type
= s
->attrib
& SVM_SELECTOR_TYPE_MASK
;
726 var
->s
= (s
->attrib
>> SVM_SELECTOR_S_SHIFT
) & 1;
727 var
->dpl
= (s
->attrib
>> SVM_SELECTOR_DPL_SHIFT
) & 3;
728 var
->present
= (s
->attrib
>> SVM_SELECTOR_P_SHIFT
) & 1;
729 var
->avl
= (s
->attrib
>> SVM_SELECTOR_AVL_SHIFT
) & 1;
730 var
->l
= (s
->attrib
>> SVM_SELECTOR_L_SHIFT
) & 1;
731 var
->db
= (s
->attrib
>> SVM_SELECTOR_DB_SHIFT
) & 1;
732 var
->g
= (s
->attrib
>> SVM_SELECTOR_G_SHIFT
) & 1;
733 var
->unusable
= !var
->present
;
736 static void svm_get_cs_db_l_bits(struct kvm_vcpu
*vcpu
, int *db
, int *l
)
738 struct vmcb_seg
*s
= svm_seg(vcpu
, VCPU_SREG_CS
);
740 *db
= (s
->attrib
>> SVM_SELECTOR_DB_SHIFT
) & 1;
741 *l
= (s
->attrib
>> SVM_SELECTOR_L_SHIFT
) & 1;
744 static void svm_get_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
746 struct vcpu_svm
*svm
= to_svm(vcpu
);
748 dt
->limit
= svm
->vmcb
->save
.idtr
.limit
;
749 dt
->base
= svm
->vmcb
->save
.idtr
.base
;
752 static void svm_set_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
754 struct vcpu_svm
*svm
= to_svm(vcpu
);
756 svm
->vmcb
->save
.idtr
.limit
= dt
->limit
;
757 svm
->vmcb
->save
.idtr
.base
= dt
->base
;
760 static void svm_get_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
762 struct vcpu_svm
*svm
= to_svm(vcpu
);
764 dt
->limit
= svm
->vmcb
->save
.gdtr
.limit
;
765 dt
->base
= svm
->vmcb
->save
.gdtr
.base
;
768 static void svm_set_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
770 struct vcpu_svm
*svm
= to_svm(vcpu
);
772 svm
->vmcb
->save
.gdtr
.limit
= dt
->limit
;
773 svm
->vmcb
->save
.gdtr
.base
= dt
->base
;
776 static void svm_decache_cr4_guest_bits(struct kvm_vcpu
*vcpu
)
780 static void svm_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
782 struct vcpu_svm
*svm
= to_svm(vcpu
);
785 if (vcpu
->shadow_efer
& KVM_EFER_LME
) {
786 if (!is_paging(vcpu
) && (cr0
& X86_CR0_PG
)) {
787 vcpu
->shadow_efer
|= KVM_EFER_LMA
;
788 svm
->vmcb
->save
.efer
|= KVM_EFER_LMA
| KVM_EFER_LME
;
791 if (is_paging(vcpu
) && !(cr0
& X86_CR0_PG
) ) {
792 vcpu
->shadow_efer
&= ~KVM_EFER_LMA
;
793 svm
->vmcb
->save
.efer
&= ~(KVM_EFER_LMA
| KVM_EFER_LME
);
797 if ((vcpu
->cr0
& X86_CR0_TS
) && !(cr0
& X86_CR0_TS
)) {
798 svm
->vmcb
->control
.intercept_exceptions
&= ~(1 << NM_VECTOR
);
799 vcpu
->fpu_active
= 1;
803 cr0
|= X86_CR0_PG
| X86_CR0_WP
;
804 cr0
&= ~(X86_CR0_CD
| X86_CR0_NW
);
805 svm
->vmcb
->save
.cr0
= cr0
;
808 static void svm_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
811 to_svm(vcpu
)->vmcb
->save
.cr4
= cr4
| X86_CR4_PAE
;
814 static void svm_set_segment(struct kvm_vcpu
*vcpu
,
815 struct kvm_segment
*var
, int seg
)
817 struct vcpu_svm
*svm
= to_svm(vcpu
);
818 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
821 s
->limit
= var
->limit
;
822 s
->selector
= var
->selector
;
826 s
->attrib
= (var
->type
& SVM_SELECTOR_TYPE_MASK
);
827 s
->attrib
|= (var
->s
& 1) << SVM_SELECTOR_S_SHIFT
;
828 s
->attrib
|= (var
->dpl
& 3) << SVM_SELECTOR_DPL_SHIFT
;
829 s
->attrib
|= (var
->present
& 1) << SVM_SELECTOR_P_SHIFT
;
830 s
->attrib
|= (var
->avl
& 1) << SVM_SELECTOR_AVL_SHIFT
;
831 s
->attrib
|= (var
->l
& 1) << SVM_SELECTOR_L_SHIFT
;
832 s
->attrib
|= (var
->db
& 1) << SVM_SELECTOR_DB_SHIFT
;
833 s
->attrib
|= (var
->g
& 1) << SVM_SELECTOR_G_SHIFT
;
835 if (seg
== VCPU_SREG_CS
)
837 = (svm
->vmcb
->save
.cs
.attrib
838 >> SVM_SELECTOR_DPL_SHIFT
) & 3;
844 svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK;
845 svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
849 static int svm_guest_debug(struct kvm_vcpu
*vcpu
, struct kvm_debug_guest
*dbg
)
854 static void load_host_msrs(struct kvm_vcpu
*vcpu
)
857 wrmsrl(MSR_GS_BASE
, to_svm(vcpu
)->host_gs_base
);
861 static void save_host_msrs(struct kvm_vcpu
*vcpu
)
864 rdmsrl(MSR_GS_BASE
, to_svm(vcpu
)->host_gs_base
);
868 static void new_asid(struct kvm_vcpu
*vcpu
, struct svm_cpu_data
*svm_data
)
870 struct vcpu_svm
*svm
= to_svm(vcpu
);
872 if (svm_data
->next_asid
> svm_data
->max_asid
) {
873 ++svm_data
->asid_generation
;
874 svm_data
->next_asid
= 1;
875 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_FLUSH_ALL_ASID
;
878 vcpu
->cpu
= svm_data
->cpu
;
879 svm
->asid_generation
= svm_data
->asid_generation
;
880 svm
->vmcb
->control
.asid
= svm_data
->next_asid
++;
883 static void svm_invlpg(struct kvm_vcpu
*vcpu
, gva_t address
)
885 invlpga(address
, to_svm(vcpu
)->vmcb
->control
.asid
); // is needed?
888 static unsigned long svm_get_dr(struct kvm_vcpu
*vcpu
, int dr
)
890 return to_svm(vcpu
)->db_regs
[dr
];
893 static void svm_set_dr(struct kvm_vcpu
*vcpu
, int dr
, unsigned long value
,
896 struct vcpu_svm
*svm
= to_svm(vcpu
);
900 if (svm
->vmcb
->save
.dr7
& DR7_GD_MASK
) {
901 svm
->vmcb
->save
.dr7
&= ~DR7_GD_MASK
;
902 svm
->vmcb
->save
.dr6
|= DR6_BD_MASK
;
903 *exception
= DB_VECTOR
;
909 svm
->db_regs
[dr
] = value
;
912 if (vcpu
->cr4
& X86_CR4_DE
) {
913 *exception
= UD_VECTOR
;
917 if (value
& ~((1ULL << 32) - 1)) {
918 *exception
= GP_VECTOR
;
921 svm
->vmcb
->save
.dr7
= value
;
925 printk(KERN_DEBUG
"%s: unexpected dr %u\n",
927 *exception
= UD_VECTOR
;
932 static int pf_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
934 struct vcpu_svm
*svm
= to_svm(vcpu
);
935 u32 exit_int_info
= svm
->vmcb
->control
.exit_int_info
;
938 enum emulation_result er
;
941 if (is_external_interrupt(exit_int_info
))
942 push_irq(vcpu
, exit_int_info
& SVM_EVTINJ_VEC_MASK
);
944 mutex_lock(&vcpu
->kvm
->lock
);
946 fault_address
= svm
->vmcb
->control
.exit_info_2
;
947 error_code
= svm
->vmcb
->control
.exit_info_1
;
948 r
= kvm_mmu_page_fault(vcpu
, fault_address
, error_code
);
950 mutex_unlock(&vcpu
->kvm
->lock
);
954 mutex_unlock(&vcpu
->kvm
->lock
);
957 er
= emulate_instruction(vcpu
, kvm_run
, fault_address
, error_code
);
958 mutex_unlock(&vcpu
->kvm
->lock
);
963 case EMULATE_DO_MMIO
:
964 ++vcpu
->stat
.mmio_exits
;
967 vcpu_printf(vcpu
, "%s: emulate fail\n", __FUNCTION__
);
973 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
977 static int nm_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
979 struct vcpu_svm
*svm
= to_svm(vcpu
);
981 svm
->vmcb
->control
.intercept_exceptions
&= ~(1 << NM_VECTOR
);
982 if (!(vcpu
->cr0
& X86_CR0_TS
))
983 svm
->vmcb
->save
.cr0
&= ~X86_CR0_TS
;
984 vcpu
->fpu_active
= 1;
989 static int shutdown_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
991 struct vcpu_svm
*svm
= to_svm(vcpu
);
993 * VMCB is undefined after a SHUTDOWN intercept
994 * so reinitialize it.
996 clear_page(svm
->vmcb
);
997 init_vmcb(svm
->vmcb
);
999 kvm_run
->exit_reason
= KVM_EXIT_SHUTDOWN
;
1003 static int io_get_override(struct kvm_vcpu
*vcpu
,
1004 struct vmcb_seg
**seg
,
1007 struct vcpu_svm
*svm
= to_svm(vcpu
);
1008 u8 inst
[MAX_INST_SIZE
];
1009 unsigned ins_length
;
1013 rip
= svm
->vmcb
->save
.rip
;
1014 ins_length
= svm
->next_rip
- rip
;
1015 rip
+= svm
->vmcb
->save
.cs
.base
;
1017 if (ins_length
> MAX_INST_SIZE
)
1019 "%s: inst length err, cs base 0x%llx rip 0x%llx "
1020 "next rip 0x%llx ins_length %u\n",
1022 svm
->vmcb
->save
.cs
.base
,
1023 svm
->vmcb
->save
.rip
,
1024 svm
->vmcb
->control
.exit_info_2
,
1027 if (kvm_read_guest(vcpu
, rip
, ins_length
, inst
) != ins_length
)
1033 for (i
= 0; i
< ins_length
; i
++)
1044 *seg
= &svm
->vmcb
->save
.cs
;
1047 *seg
= &svm
->vmcb
->save
.ss
;
1050 *seg
= &svm
->vmcb
->save
.ds
;
1053 *seg
= &svm
->vmcb
->save
.es
;
1056 *seg
= &svm
->vmcb
->save
.fs
;
1059 *seg
= &svm
->vmcb
->save
.gs
;
1064 printk(KERN_DEBUG
"%s: unexpected\n", __FUNCTION__
);
1068 static unsigned long io_adress(struct kvm_vcpu
*vcpu
, int ins
, gva_t
*address
)
1070 unsigned long addr_mask
;
1072 struct vmcb_seg
*seg
;
1074 struct vcpu_svm
*svm
= to_svm(vcpu
);
1075 struct vmcb_save_area
*save_area
= &svm
->vmcb
->save
;
1076 u16 cs_attrib
= save_area
->cs
.attrib
;
1077 unsigned addr_size
= get_addr_size(vcpu
);
1079 if (!io_get_override(vcpu
, &seg
, &addr_override
))
1083 addr_size
= (addr_size
== 2) ? 4: (addr_size
>> 1);
1086 reg
= &vcpu
->regs
[VCPU_REGS_RDI
];
1087 seg
= &svm
->vmcb
->save
.es
;
1089 reg
= &vcpu
->regs
[VCPU_REGS_RSI
];
1090 seg
= (seg
) ? seg
: &svm
->vmcb
->save
.ds
;
1093 addr_mask
= ~0ULL >> (64 - (addr_size
* 8));
1095 if ((cs_attrib
& SVM_SELECTOR_L_MASK
) &&
1096 !(svm
->vmcb
->save
.rflags
& X86_EFLAGS_VM
)) {
1097 *address
= (*reg
& addr_mask
);
1101 if (!(seg
->attrib
& SVM_SELECTOR_P_SHIFT
)) {
1102 svm_inject_gp(vcpu
, 0);
1106 *address
= (*reg
& addr_mask
) + seg
->base
;
1110 static int io_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1112 struct vcpu_svm
*svm
= to_svm(vcpu
);
1113 u32 io_info
= svm
->vmcb
->control
.exit_info_1
; //address size bug?
1114 int size
, down
, in
, string
, rep
;
1116 unsigned long count
;
1119 ++vcpu
->stat
.io_exits
;
1121 svm
->next_rip
= svm
->vmcb
->control
.exit_info_2
;
1123 in
= (io_info
& SVM_IOIO_TYPE_MASK
) != 0;
1124 port
= io_info
>> 16;
1125 size
= (io_info
& SVM_IOIO_SIZE_MASK
) >> SVM_IOIO_SIZE_SHIFT
;
1126 string
= (io_info
& SVM_IOIO_STR_MASK
) != 0;
1127 rep
= (io_info
& SVM_IOIO_REP_MASK
) != 0;
1129 down
= (svm
->vmcb
->save
.rflags
& X86_EFLAGS_DF
) != 0;
1134 addr_mask
= io_adress(vcpu
, in
, &address
);
1136 printk(KERN_DEBUG
"%s: get io address failed\n",
1142 count
= vcpu
->regs
[VCPU_REGS_RCX
] & addr_mask
;
1144 return kvm_setup_pio(vcpu
, kvm_run
, in
, size
, count
, string
, down
,
1145 address
, rep
, port
);
1148 static int nop_on_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1153 static int halt_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1155 struct vcpu_svm
*svm
= to_svm(vcpu
);
1157 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 1;
1158 skip_emulated_instruction(vcpu
);
1159 return kvm_emulate_halt(vcpu
);
1162 static int vmmcall_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1164 struct vcpu_svm
*svm
= to_svm(vcpu
);
1166 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 3;
1167 skip_emulated_instruction(vcpu
);
1168 return kvm_hypercall(vcpu
, kvm_run
);
1171 static int invalid_op_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1177 static int task_switch_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1179 printk(KERN_DEBUG
"%s: task swiche is unsupported\n", __FUNCTION__
);
1180 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1184 static int cpuid_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1186 struct vcpu_svm
*svm
= to_svm(vcpu
);
1188 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 2;
1189 kvm_emulate_cpuid(vcpu
);
1193 static int emulate_on_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1195 if (emulate_instruction(vcpu
, NULL
, 0, 0) != EMULATE_DONE
)
1196 printk(KERN_ERR
"%s: failed\n", __FUNCTION__
);
1200 static int svm_get_msr(struct kvm_vcpu
*vcpu
, unsigned ecx
, u64
*data
)
1202 struct vcpu_svm
*svm
= to_svm(vcpu
);
1205 case MSR_IA32_TIME_STAMP_COUNTER
: {
1209 *data
= svm
->vmcb
->control
.tsc_offset
+ tsc
;
1213 *data
= svm
->vmcb
->save
.star
;
1215 #ifdef CONFIG_X86_64
1217 *data
= svm
->vmcb
->save
.lstar
;
1220 *data
= svm
->vmcb
->save
.cstar
;
1222 case MSR_KERNEL_GS_BASE
:
1223 *data
= svm
->vmcb
->save
.kernel_gs_base
;
1225 case MSR_SYSCALL_MASK
:
1226 *data
= svm
->vmcb
->save
.sfmask
;
1229 case MSR_IA32_SYSENTER_CS
:
1230 *data
= svm
->vmcb
->save
.sysenter_cs
;
1232 case MSR_IA32_SYSENTER_EIP
:
1233 *data
= svm
->vmcb
->save
.sysenter_eip
;
1235 case MSR_IA32_SYSENTER_ESP
:
1236 *data
= svm
->vmcb
->save
.sysenter_esp
;
1239 return kvm_get_msr_common(vcpu
, ecx
, data
);
1244 static int rdmsr_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1246 struct vcpu_svm
*svm
= to_svm(vcpu
);
1247 u32 ecx
= vcpu
->regs
[VCPU_REGS_RCX
];
1250 if (svm_get_msr(vcpu
, ecx
, &data
))
1251 svm_inject_gp(vcpu
, 0);
1253 svm
->vmcb
->save
.rax
= data
& 0xffffffff;
1254 vcpu
->regs
[VCPU_REGS_RDX
] = data
>> 32;
1255 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 2;
1256 skip_emulated_instruction(vcpu
);
1261 static int svm_set_msr(struct kvm_vcpu
*vcpu
, unsigned ecx
, u64 data
)
1263 struct vcpu_svm
*svm
= to_svm(vcpu
);
1266 case MSR_IA32_TIME_STAMP_COUNTER
: {
1270 svm
->vmcb
->control
.tsc_offset
= data
- tsc
;
1274 svm
->vmcb
->save
.star
= data
;
1276 #ifdef CONFIG_X86_64
1278 svm
->vmcb
->save
.lstar
= data
;
1281 svm
->vmcb
->save
.cstar
= data
;
1283 case MSR_KERNEL_GS_BASE
:
1284 svm
->vmcb
->save
.kernel_gs_base
= data
;
1286 case MSR_SYSCALL_MASK
:
1287 svm
->vmcb
->save
.sfmask
= data
;
1290 case MSR_IA32_SYSENTER_CS
:
1291 svm
->vmcb
->save
.sysenter_cs
= data
;
1293 case MSR_IA32_SYSENTER_EIP
:
1294 svm
->vmcb
->save
.sysenter_eip
= data
;
1296 case MSR_IA32_SYSENTER_ESP
:
1297 svm
->vmcb
->save
.sysenter_esp
= data
;
1300 return kvm_set_msr_common(vcpu
, ecx
, data
);
1305 static int wrmsr_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1307 struct vcpu_svm
*svm
= to_svm(vcpu
);
1308 u32 ecx
= vcpu
->regs
[VCPU_REGS_RCX
];
1309 u64 data
= (svm
->vmcb
->save
.rax
& -1u)
1310 | ((u64
)(vcpu
->regs
[VCPU_REGS_RDX
] & -1u) << 32);
1311 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 2;
1312 if (svm_set_msr(vcpu
, ecx
, data
))
1313 svm_inject_gp(vcpu
, 0);
1315 skip_emulated_instruction(vcpu
);
1319 static int msr_interception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1321 if (to_svm(vcpu
)->vmcb
->control
.exit_info_1
)
1322 return wrmsr_interception(vcpu
, kvm_run
);
1324 return rdmsr_interception(vcpu
, kvm_run
);
1327 static int interrupt_window_interception(struct kvm_vcpu
*vcpu
,
1328 struct kvm_run
*kvm_run
)
1331 * If the user space waits to inject interrupts, exit as soon as
1334 if (kvm_run
->request_interrupt_window
&&
1335 !vcpu
->irq_summary
) {
1336 ++vcpu
->stat
.irq_window_exits
;
1337 kvm_run
->exit_reason
= KVM_EXIT_IRQ_WINDOW_OPEN
;
1344 static int (*svm_exit_handlers
[])(struct kvm_vcpu
*vcpu
,
1345 struct kvm_run
*kvm_run
) = {
1346 [SVM_EXIT_READ_CR0
] = emulate_on_interception
,
1347 [SVM_EXIT_READ_CR3
] = emulate_on_interception
,
1348 [SVM_EXIT_READ_CR4
] = emulate_on_interception
,
1350 [SVM_EXIT_WRITE_CR0
] = emulate_on_interception
,
1351 [SVM_EXIT_WRITE_CR3
] = emulate_on_interception
,
1352 [SVM_EXIT_WRITE_CR4
] = emulate_on_interception
,
1353 [SVM_EXIT_READ_DR0
] = emulate_on_interception
,
1354 [SVM_EXIT_READ_DR1
] = emulate_on_interception
,
1355 [SVM_EXIT_READ_DR2
] = emulate_on_interception
,
1356 [SVM_EXIT_READ_DR3
] = emulate_on_interception
,
1357 [SVM_EXIT_WRITE_DR0
] = emulate_on_interception
,
1358 [SVM_EXIT_WRITE_DR1
] = emulate_on_interception
,
1359 [SVM_EXIT_WRITE_DR2
] = emulate_on_interception
,
1360 [SVM_EXIT_WRITE_DR3
] = emulate_on_interception
,
1361 [SVM_EXIT_WRITE_DR5
] = emulate_on_interception
,
1362 [SVM_EXIT_WRITE_DR7
] = emulate_on_interception
,
1363 [SVM_EXIT_EXCP_BASE
+ PF_VECTOR
] = pf_interception
,
1364 [SVM_EXIT_EXCP_BASE
+ NM_VECTOR
] = nm_interception
,
1365 [SVM_EXIT_INTR
] = nop_on_interception
,
1366 [SVM_EXIT_NMI
] = nop_on_interception
,
1367 [SVM_EXIT_SMI
] = nop_on_interception
,
1368 [SVM_EXIT_INIT
] = nop_on_interception
,
1369 [SVM_EXIT_VINTR
] = interrupt_window_interception
,
1370 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1371 [SVM_EXIT_CPUID
] = cpuid_interception
,
1372 [SVM_EXIT_HLT
] = halt_interception
,
1373 [SVM_EXIT_INVLPG
] = emulate_on_interception
,
1374 [SVM_EXIT_INVLPGA
] = invalid_op_interception
,
1375 [SVM_EXIT_IOIO
] = io_interception
,
1376 [SVM_EXIT_MSR
] = msr_interception
,
1377 [SVM_EXIT_TASK_SWITCH
] = task_switch_interception
,
1378 [SVM_EXIT_SHUTDOWN
] = shutdown_interception
,
1379 [SVM_EXIT_VMRUN
] = invalid_op_interception
,
1380 [SVM_EXIT_VMMCALL
] = vmmcall_interception
,
1381 [SVM_EXIT_VMLOAD
] = invalid_op_interception
,
1382 [SVM_EXIT_VMSAVE
] = invalid_op_interception
,
1383 [SVM_EXIT_STGI
] = invalid_op_interception
,
1384 [SVM_EXIT_CLGI
] = invalid_op_interception
,
1385 [SVM_EXIT_SKINIT
] = invalid_op_interception
,
1386 [SVM_EXIT_MONITOR
] = invalid_op_interception
,
1387 [SVM_EXIT_MWAIT
] = invalid_op_interception
,
1391 static int handle_exit(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1393 struct vcpu_svm
*svm
= to_svm(vcpu
);
1394 u32 exit_code
= svm
->vmcb
->control
.exit_code
;
1396 if (is_external_interrupt(svm
->vmcb
->control
.exit_int_info
) &&
1397 exit_code
!= SVM_EXIT_EXCP_BASE
+ PF_VECTOR
)
1398 printk(KERN_ERR
"%s: unexpected exit_ini_info 0x%x "
1400 __FUNCTION__
, svm
->vmcb
->control
.exit_int_info
,
1403 if (exit_code
>= ARRAY_SIZE(svm_exit_handlers
)
1404 || svm_exit_handlers
[exit_code
] == 0) {
1405 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1406 kvm_run
->hw
.hardware_exit_reason
= exit_code
;
1410 return svm_exit_handlers
[exit_code
](vcpu
, kvm_run
);
1413 static void reload_tss(struct kvm_vcpu
*vcpu
)
1415 int cpu
= raw_smp_processor_id();
1417 struct svm_cpu_data
*svm_data
= per_cpu(svm_data
, cpu
);
1418 svm_data
->tss_desc
->type
= 9; //available 32/64-bit TSS
1422 static void pre_svm_run(struct kvm_vcpu
*vcpu
)
1424 struct vcpu_svm
*svm
= to_svm(vcpu
);
1425 int cpu
= raw_smp_processor_id();
1427 struct svm_cpu_data
*svm_data
= per_cpu(svm_data
, cpu
);
1429 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_DO_NOTHING
;
1430 if (vcpu
->cpu
!= cpu
||
1431 svm
->asid_generation
!= svm_data
->asid_generation
)
1432 new_asid(vcpu
, svm_data
);
1436 static inline void kvm_do_inject_irq(struct kvm_vcpu
*vcpu
)
1438 struct vmcb_control_area
*control
;
1440 control
= &to_svm(vcpu
)->vmcb
->control
;
1441 control
->int_vector
= pop_irq(vcpu
);
1442 control
->int_ctl
&= ~V_INTR_PRIO_MASK
;
1443 control
->int_ctl
|= V_IRQ_MASK
|
1444 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT
);
1447 static void kvm_reput_irq(struct kvm_vcpu
*vcpu
)
1449 struct vmcb_control_area
*control
= &to_svm(vcpu
)->vmcb
->control
;
1451 if (control
->int_ctl
& V_IRQ_MASK
) {
1452 control
->int_ctl
&= ~V_IRQ_MASK
;
1453 push_irq(vcpu
, control
->int_vector
);
1456 vcpu
->interrupt_window_open
=
1457 !(control
->int_state
& SVM_INTERRUPT_SHADOW_MASK
);
1460 static void do_interrupt_requests(struct kvm_vcpu
*vcpu
,
1461 struct kvm_run
*kvm_run
)
1463 struct vcpu_svm
*svm
= to_svm(vcpu
);
1464 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
1466 vcpu
->interrupt_window_open
=
1467 (!(control
->int_state
& SVM_INTERRUPT_SHADOW_MASK
) &&
1468 (svm
->vmcb
->save
.rflags
& X86_EFLAGS_IF
));
1470 if (vcpu
->interrupt_window_open
&& vcpu
->irq_summary
)
1472 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1474 kvm_do_inject_irq(vcpu
);
1477 * Interrupts blocked. Wait for unblock.
1479 if (!vcpu
->interrupt_window_open
&&
1480 (vcpu
->irq_summary
|| kvm_run
->request_interrupt_window
)) {
1481 control
->intercept
|= 1ULL << INTERCEPT_VINTR
;
1483 control
->intercept
&= ~(1ULL << INTERCEPT_VINTR
);
1486 static void post_kvm_run_save(struct kvm_vcpu
*vcpu
,
1487 struct kvm_run
*kvm_run
)
1489 struct vcpu_svm
*svm
= to_svm(vcpu
);
1491 kvm_run
->ready_for_interrupt_injection
= (vcpu
->interrupt_window_open
&&
1492 vcpu
->irq_summary
== 0);
1493 kvm_run
->if_flag
= (svm
->vmcb
->save
.rflags
& X86_EFLAGS_IF
) != 0;
1494 kvm_run
->cr8
= vcpu
->cr8
;
1495 kvm_run
->apic_base
= vcpu
->apic_base
;
1499 * Check if userspace requested an interrupt window, and that the
1500 * interrupt window is open.
1502 * No need to exit to userspace if we already have an interrupt queued.
1504 static int dm_request_for_irq_injection(struct kvm_vcpu
*vcpu
,
1505 struct kvm_run
*kvm_run
)
1507 return (!vcpu
->irq_summary
&&
1508 kvm_run
->request_interrupt_window
&&
1509 vcpu
->interrupt_window_open
&&
1510 (to_svm(vcpu
)->vmcb
->save
.rflags
& X86_EFLAGS_IF
));
1513 static void save_db_regs(unsigned long *db_regs
)
1515 asm volatile ("mov %%dr0, %0" : "=r"(db_regs
[0]));
1516 asm volatile ("mov %%dr1, %0" : "=r"(db_regs
[1]));
1517 asm volatile ("mov %%dr2, %0" : "=r"(db_regs
[2]));
1518 asm volatile ("mov %%dr3, %0" : "=r"(db_regs
[3]));
1521 static void load_db_regs(unsigned long *db_regs
)
1523 asm volatile ("mov %0, %%dr0" : : "r"(db_regs
[0]));
1524 asm volatile ("mov %0, %%dr1" : : "r"(db_regs
[1]));
1525 asm volatile ("mov %0, %%dr2" : : "r"(db_regs
[2]));
1526 asm volatile ("mov %0, %%dr3" : : "r"(db_regs
[3]));
1529 static void svm_flush_tlb(struct kvm_vcpu
*vcpu
)
1531 force_new_asid(vcpu
);
1534 static int svm_vcpu_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1536 struct vcpu_svm
*svm
= to_svm(vcpu
);
1543 r
= kvm_mmu_reload(vcpu
);
1547 if (!vcpu
->mmio_read_completed
)
1548 do_interrupt_requests(vcpu
, kvm_run
);
1552 vcpu
->guest_mode
= 1;
1554 if (test_and_clear_bit(KVM_TLB_FLUSH
, &vcpu
->requests
))
1555 svm_flush_tlb(vcpu
);
1559 save_host_msrs(vcpu
);
1560 fs_selector
= read_fs();
1561 gs_selector
= read_gs();
1562 ldt_selector
= read_ldt();
1563 svm
->host_cr2
= kvm_read_cr2();
1564 svm
->host_dr6
= read_dr6();
1565 svm
->host_dr7
= read_dr7();
1566 svm
->vmcb
->save
.cr2
= vcpu
->cr2
;
1568 if (svm
->vmcb
->save
.dr7
& 0xff) {
1570 save_db_regs(svm
->host_db_regs
);
1571 load_db_regs(svm
->db_regs
);
1574 if (vcpu
->fpu_active
) {
1575 fx_save(vcpu
->host_fx_image
);
1576 fx_restore(vcpu
->guest_fx_image
);
1580 #ifdef CONFIG_X86_64
1581 "push %%rbx; push %%rcx; push %%rdx;"
1582 "push %%rsi; push %%rdi; push %%rbp;"
1583 "push %%r8; push %%r9; push %%r10; push %%r11;"
1584 "push %%r12; push %%r13; push %%r14; push %%r15;"
1586 "push %%ebx; push %%ecx; push %%edx;"
1587 "push %%esi; push %%edi; push %%ebp;"
1590 #ifdef CONFIG_X86_64
1591 "mov %c[rbx](%[svm]), %%rbx \n\t"
1592 "mov %c[rcx](%[svm]), %%rcx \n\t"
1593 "mov %c[rdx](%[svm]), %%rdx \n\t"
1594 "mov %c[rsi](%[svm]), %%rsi \n\t"
1595 "mov %c[rdi](%[svm]), %%rdi \n\t"
1596 "mov %c[rbp](%[svm]), %%rbp \n\t"
1597 "mov %c[r8](%[svm]), %%r8 \n\t"
1598 "mov %c[r9](%[svm]), %%r9 \n\t"
1599 "mov %c[r10](%[svm]), %%r10 \n\t"
1600 "mov %c[r11](%[svm]), %%r11 \n\t"
1601 "mov %c[r12](%[svm]), %%r12 \n\t"
1602 "mov %c[r13](%[svm]), %%r13 \n\t"
1603 "mov %c[r14](%[svm]), %%r14 \n\t"
1604 "mov %c[r15](%[svm]), %%r15 \n\t"
1606 "mov %c[rbx](%[svm]), %%ebx \n\t"
1607 "mov %c[rcx](%[svm]), %%ecx \n\t"
1608 "mov %c[rdx](%[svm]), %%edx \n\t"
1609 "mov %c[rsi](%[svm]), %%esi \n\t"
1610 "mov %c[rdi](%[svm]), %%edi \n\t"
1611 "mov %c[rbp](%[svm]), %%ebp \n\t"
1614 #ifdef CONFIG_X86_64
1615 /* Enter guest mode */
1617 "mov %c[vmcb](%[svm]), %%rax \n\t"
1623 /* Enter guest mode */
1625 "mov %c[vmcb](%[svm]), %%eax \n\t"
1632 /* Save guest registers, load host registers */
1633 #ifdef CONFIG_X86_64
1634 "mov %%rbx, %c[rbx](%[svm]) \n\t"
1635 "mov %%rcx, %c[rcx](%[svm]) \n\t"
1636 "mov %%rdx, %c[rdx](%[svm]) \n\t"
1637 "mov %%rsi, %c[rsi](%[svm]) \n\t"
1638 "mov %%rdi, %c[rdi](%[svm]) \n\t"
1639 "mov %%rbp, %c[rbp](%[svm]) \n\t"
1640 "mov %%r8, %c[r8](%[svm]) \n\t"
1641 "mov %%r9, %c[r9](%[svm]) \n\t"
1642 "mov %%r10, %c[r10](%[svm]) \n\t"
1643 "mov %%r11, %c[r11](%[svm]) \n\t"
1644 "mov %%r12, %c[r12](%[svm]) \n\t"
1645 "mov %%r13, %c[r13](%[svm]) \n\t"
1646 "mov %%r14, %c[r14](%[svm]) \n\t"
1647 "mov %%r15, %c[r15](%[svm]) \n\t"
1649 "pop %%r15; pop %%r14; pop %%r13; pop %%r12;"
1650 "pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
1651 "pop %%rbp; pop %%rdi; pop %%rsi;"
1652 "pop %%rdx; pop %%rcx; pop %%rbx; \n\t"
1654 "mov %%ebx, %c[rbx](%[svm]) \n\t"
1655 "mov %%ecx, %c[rcx](%[svm]) \n\t"
1656 "mov %%edx, %c[rdx](%[svm]) \n\t"
1657 "mov %%esi, %c[rsi](%[svm]) \n\t"
1658 "mov %%edi, %c[rdi](%[svm]) \n\t"
1659 "mov %%ebp, %c[rbp](%[svm]) \n\t"
1661 "pop %%ebp; pop %%edi; pop %%esi;"
1662 "pop %%edx; pop %%ecx; pop %%ebx; \n\t"
1666 [vmcb
]"i"(offsetof(struct vcpu_svm
, vmcb_pa
)),
1667 [rbx
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RBX
])),
1668 [rcx
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RCX
])),
1669 [rdx
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RDX
])),
1670 [rsi
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RSI
])),
1671 [rdi
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RDI
])),
1672 [rbp
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RBP
]))
1673 #ifdef CONFIG_X86_64
1674 ,[r8
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R8
])),
1675 [r9
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R9
])),
1676 [r10
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R10
])),
1677 [r11
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R11
])),
1678 [r12
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R12
])),
1679 [r13
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R13
])),
1680 [r14
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R14
])),
1681 [r15
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R15
]))
1685 vcpu
->guest_mode
= 0;
1687 if (vcpu
->fpu_active
) {
1688 fx_save(vcpu
->guest_fx_image
);
1689 fx_restore(vcpu
->host_fx_image
);
1692 if ((svm
->vmcb
->save
.dr7
& 0xff))
1693 load_db_regs(svm
->host_db_regs
);
1695 vcpu
->cr2
= svm
->vmcb
->save
.cr2
;
1697 write_dr6(svm
->host_dr6
);
1698 write_dr7(svm
->host_dr7
);
1699 kvm_write_cr2(svm
->host_cr2
);
1701 load_fs(fs_selector
);
1702 load_gs(gs_selector
);
1703 load_ldt(ldt_selector
);
1704 load_host_msrs(vcpu
);
1709 * Profile KVM exit RIPs:
1711 if (unlikely(prof_on
== KVM_PROFILING
))
1712 profile_hit(KVM_PROFILING
,
1713 (void *)(unsigned long)svm
->vmcb
->save
.rip
);
1717 kvm_reput_irq(vcpu
);
1721 if (svm
->vmcb
->control
.exit_code
== SVM_EXIT_ERR
) {
1722 kvm_run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
1723 kvm_run
->fail_entry
.hardware_entry_failure_reason
1724 = svm
->vmcb
->control
.exit_code
;
1725 post_kvm_run_save(vcpu
, kvm_run
);
1729 r
= handle_exit(vcpu
, kvm_run
);
1731 if (signal_pending(current
)) {
1732 ++vcpu
->stat
.signal_exits
;
1733 post_kvm_run_save(vcpu
, kvm_run
);
1734 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
1738 if (dm_request_for_irq_injection(vcpu
, kvm_run
)) {
1739 ++vcpu
->stat
.request_irq_exits
;
1740 post_kvm_run_save(vcpu
, kvm_run
);
1741 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
1747 post_kvm_run_save(vcpu
, kvm_run
);
1751 static void svm_set_cr3(struct kvm_vcpu
*vcpu
, unsigned long root
)
1753 struct vcpu_svm
*svm
= to_svm(vcpu
);
1755 svm
->vmcb
->save
.cr3
= root
;
1756 force_new_asid(vcpu
);
1758 if (vcpu
->fpu_active
) {
1759 svm
->vmcb
->control
.intercept_exceptions
|= (1 << NM_VECTOR
);
1760 svm
->vmcb
->save
.cr0
|= X86_CR0_TS
;
1761 vcpu
->fpu_active
= 0;
1765 static void svm_inject_page_fault(struct kvm_vcpu
*vcpu
,
1769 struct vcpu_svm
*svm
= to_svm(vcpu
);
1770 uint32_t exit_int_info
= svm
->vmcb
->control
.exit_int_info
;
1772 ++vcpu
->stat
.pf_guest
;
1774 if (is_page_fault(exit_int_info
)) {
1776 svm
->vmcb
->control
.event_inj_err
= 0;
1777 svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
1778 SVM_EVTINJ_VALID_ERR
|
1779 SVM_EVTINJ_TYPE_EXEPT
|
1784 svm
->vmcb
->save
.cr2
= addr
;
1785 svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
1786 SVM_EVTINJ_VALID_ERR
|
1787 SVM_EVTINJ_TYPE_EXEPT
|
1789 svm
->vmcb
->control
.event_inj_err
= err_code
;
1793 static int is_disabled(void)
1797 rdmsrl(MSR_VM_CR
, vm_cr
);
1798 if (vm_cr
& (1 << SVM_VM_CR_SVM_DISABLE
))
1805 svm_patch_hypercall(struct kvm_vcpu
*vcpu
, unsigned char *hypercall
)
1808 * Patch in the VMMCALL instruction:
1810 hypercall
[0] = 0x0f;
1811 hypercall
[1] = 0x01;
1812 hypercall
[2] = 0xd9;
1813 hypercall
[3] = 0xc3;
1816 static struct kvm_arch_ops svm_arch_ops
= {
1817 .cpu_has_kvm_support
= has_svm
,
1818 .disabled_by_bios
= is_disabled
,
1819 .hardware_setup
= svm_hardware_setup
,
1820 .hardware_unsetup
= svm_hardware_unsetup
,
1821 .hardware_enable
= svm_hardware_enable
,
1822 .hardware_disable
= svm_hardware_disable
,
1824 .vcpu_create
= svm_create_vcpu
,
1825 .vcpu_free
= svm_free_vcpu
,
1827 .vcpu_load
= svm_vcpu_load
,
1828 .vcpu_put
= svm_vcpu_put
,
1829 .vcpu_decache
= svm_vcpu_decache
,
1831 .set_guest_debug
= svm_guest_debug
,
1832 .get_msr
= svm_get_msr
,
1833 .set_msr
= svm_set_msr
,
1834 .get_segment_base
= svm_get_segment_base
,
1835 .get_segment
= svm_get_segment
,
1836 .set_segment
= svm_set_segment
,
1837 .get_cs_db_l_bits
= svm_get_cs_db_l_bits
,
1838 .decache_cr4_guest_bits
= svm_decache_cr4_guest_bits
,
1839 .set_cr0
= svm_set_cr0
,
1840 .set_cr3
= svm_set_cr3
,
1841 .set_cr4
= svm_set_cr4
,
1842 .set_efer
= svm_set_efer
,
1843 .get_idt
= svm_get_idt
,
1844 .set_idt
= svm_set_idt
,
1845 .get_gdt
= svm_get_gdt
,
1846 .set_gdt
= svm_set_gdt
,
1847 .get_dr
= svm_get_dr
,
1848 .set_dr
= svm_set_dr
,
1849 .cache_regs
= svm_cache_regs
,
1850 .decache_regs
= svm_decache_regs
,
1851 .get_rflags
= svm_get_rflags
,
1852 .set_rflags
= svm_set_rflags
,
1854 .invlpg
= svm_invlpg
,
1855 .tlb_flush
= svm_flush_tlb
,
1856 .inject_page_fault
= svm_inject_page_fault
,
1858 .inject_gp
= svm_inject_gp
,
1860 .run
= svm_vcpu_run
,
1861 .skip_emulated_instruction
= skip_emulated_instruction
,
1862 .patch_hypercall
= svm_patch_hypercall
,
1865 static int __init
svm_init(void)
1867 return kvm_init_arch(&svm_arch_ops
, THIS_MODULE
);
1870 static void __exit
svm_exit(void)
1875 module_init(svm_init
)
1876 module_exit(svm_exit
)