2 * Kernel-based Virtual Machine driver for Linux
6 * Copyright (C) 2006 Qumranet, Inc.
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
18 #include "x86_emulate.h"
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/vmalloc.h>
24 #include <linux/highmem.h>
25 #include <linux/sched.h>
29 MODULE_AUTHOR("Qumranet");
30 MODULE_LICENSE("GPL");
32 #define IOPM_ALLOC_ORDER 2
33 #define MSRPM_ALLOC_ORDER 1
39 #define DR7_GD_MASK (1 << 13)
40 #define DR6_BD_MASK (1 << 13)
42 #define SEG_TYPE_LDT 2
43 #define SEG_TYPE_BUSY_TSS16 3
45 #define SVM_FEATURE_NPT (1 << 0)
46 #define SVM_FEATURE_LBRV (1 << 1)
47 #define SVM_DEATURE_SVML (1 << 2)
49 static void kvm_reput_irq(struct vcpu_svm
*svm
);
51 static inline struct vcpu_svm
*to_svm(struct kvm_vcpu
*vcpu
)
53 return container_of(vcpu
, struct vcpu_svm
, vcpu
);
56 unsigned long iopm_base
;
57 unsigned long msrpm_base
;
59 struct kvm_ldttss_desc
{
62 unsigned base1
: 8, type
: 5, dpl
: 2, p
: 1;
63 unsigned limit1
: 4, zero0
: 3, g
: 1, base2
: 8;
66 } __attribute__((packed
));
74 struct kvm_ldttss_desc
*tss_desc
;
76 struct page
*save_area
;
79 static DEFINE_PER_CPU(struct svm_cpu_data
*, svm_data
);
80 static uint32_t svm_features
;
82 struct svm_init_data
{
87 static u32 msrpm_ranges
[] = {0, 0xc0000000, 0xc0010000};
89 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
90 #define MSRS_RANGE_SIZE 2048
91 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
93 #define MAX_INST_SIZE 15
95 static inline u32
svm_has(u32 feat
)
97 return svm_features
& feat
;
100 static inline u8
pop_irq(struct kvm_vcpu
*vcpu
)
102 int word_index
= __ffs(vcpu
->irq_summary
);
103 int bit_index
= __ffs(vcpu
->irq_pending
[word_index
]);
104 int irq
= word_index
* BITS_PER_LONG
+ bit_index
;
106 clear_bit(bit_index
, &vcpu
->irq_pending
[word_index
]);
107 if (!vcpu
->irq_pending
[word_index
])
108 clear_bit(word_index
, &vcpu
->irq_summary
);
112 static inline void push_irq(struct kvm_vcpu
*vcpu
, u8 irq
)
114 set_bit(irq
, vcpu
->irq_pending
);
115 set_bit(irq
/ BITS_PER_LONG
, &vcpu
->irq_summary
);
118 static inline void clgi(void)
120 asm volatile (SVM_CLGI
);
123 static inline void stgi(void)
125 asm volatile (SVM_STGI
);
128 static inline void invlpga(unsigned long addr
, u32 asid
)
130 asm volatile (SVM_INVLPGA :: "a"(addr
), "c"(asid
));
133 static inline unsigned long kvm_read_cr2(void)
137 asm volatile ("mov %%cr2, %0" : "=r" (cr2
));
141 static inline void kvm_write_cr2(unsigned long val
)
143 asm volatile ("mov %0, %%cr2" :: "r" (val
));
146 static inline unsigned long read_dr6(void)
150 asm volatile ("mov %%dr6, %0" : "=r" (dr6
));
154 static inline void write_dr6(unsigned long val
)
156 asm volatile ("mov %0, %%dr6" :: "r" (val
));
159 static inline unsigned long read_dr7(void)
163 asm volatile ("mov %%dr7, %0" : "=r" (dr7
));
167 static inline void write_dr7(unsigned long val
)
169 asm volatile ("mov %0, %%dr7" :: "r" (val
));
172 static inline void force_new_asid(struct kvm_vcpu
*vcpu
)
174 to_svm(vcpu
)->asid_generation
--;
177 static inline void flush_guest_tlb(struct kvm_vcpu
*vcpu
)
179 force_new_asid(vcpu
);
182 static void svm_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
184 if (!(efer
& EFER_LMA
))
187 to_svm(vcpu
)->vmcb
->save
.efer
= efer
| MSR_EFER_SVME_MASK
;
188 vcpu
->shadow_efer
= efer
;
191 static void svm_queue_exception(struct kvm_vcpu
*vcpu
, unsigned nr
,
192 bool has_error_code
, u32 error_code
)
194 struct vcpu_svm
*svm
= to_svm(vcpu
);
196 svm
->vmcb
->control
.event_inj
= nr
198 | (has_error_code
? SVM_EVTINJ_VALID_ERR
: 0)
199 | SVM_EVTINJ_TYPE_EXEPT
;
200 svm
->vmcb
->control
.event_inj_err
= error_code
;
203 static bool svm_exception_injected(struct kvm_vcpu
*vcpu
)
205 struct vcpu_svm
*svm
= to_svm(vcpu
);
207 return !(svm
->vmcb
->control
.exit_int_info
& SVM_EXITINTINFO_VALID
);
210 static int is_external_interrupt(u32 info
)
212 info
&= SVM_EVTINJ_TYPE_MASK
| SVM_EVTINJ_VALID
;
213 return info
== (SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_INTR
);
216 static void skip_emulated_instruction(struct kvm_vcpu
*vcpu
)
218 struct vcpu_svm
*svm
= to_svm(vcpu
);
220 if (!svm
->next_rip
) {
221 printk(KERN_DEBUG
"%s: NOP\n", __FUNCTION__
);
224 if (svm
->next_rip
- svm
->vmcb
->save
.rip
> MAX_INST_SIZE
)
225 printk(KERN_ERR
"%s: ip 0x%llx next 0x%llx\n",
230 vcpu
->rip
= svm
->vmcb
->save
.rip
= svm
->next_rip
;
231 svm
->vmcb
->control
.int_state
&= ~SVM_INTERRUPT_SHADOW_MASK
;
233 vcpu
->interrupt_window_open
= 1;
236 static int has_svm(void)
238 uint32_t eax
, ebx
, ecx
, edx
;
240 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
) {
241 printk(KERN_INFO
"has_svm: not amd\n");
245 cpuid(0x80000000, &eax
, &ebx
, &ecx
, &edx
);
246 if (eax
< SVM_CPUID_FUNC
) {
247 printk(KERN_INFO
"has_svm: can't execute cpuid_8000000a\n");
251 cpuid(0x80000001, &eax
, &ebx
, &ecx
, &edx
);
252 if (!(ecx
& (1 << SVM_CPUID_FEATURE_SHIFT
))) {
253 printk(KERN_DEBUG
"has_svm: svm not available\n");
259 static void svm_hardware_disable(void *garbage
)
261 struct svm_cpu_data
*svm_data
262 = per_cpu(svm_data
, raw_smp_processor_id());
267 wrmsrl(MSR_VM_HSAVE_PA
, 0);
268 rdmsrl(MSR_EFER
, efer
);
269 wrmsrl(MSR_EFER
, efer
& ~MSR_EFER_SVME_MASK
);
270 per_cpu(svm_data
, raw_smp_processor_id()) = NULL
;
271 __free_page(svm_data
->save_area
);
276 static void svm_hardware_enable(void *garbage
)
279 struct svm_cpu_data
*svm_data
;
282 struct desc_ptr gdt_descr
;
284 struct desc_ptr gdt_descr
;
286 struct desc_struct
*gdt
;
287 int me
= raw_smp_processor_id();
290 printk(KERN_ERR
"svm_cpu_init: err EOPNOTSUPP on %d\n", me
);
293 svm_data
= per_cpu(svm_data
, me
);
296 printk(KERN_ERR
"svm_cpu_init: svm_data is NULL on %d\n",
301 svm_data
->asid_generation
= 1;
302 svm_data
->max_asid
= cpuid_ebx(SVM_CPUID_FUNC
) - 1;
303 svm_data
->next_asid
= svm_data
->max_asid
+ 1;
304 svm_features
= cpuid_edx(SVM_CPUID_FUNC
);
306 asm volatile ("sgdt %0" : "=m"(gdt_descr
));
307 gdt
= (struct desc_struct
*)gdt_descr
.address
;
308 svm_data
->tss_desc
= (struct kvm_ldttss_desc
*)(gdt
+ GDT_ENTRY_TSS
);
310 rdmsrl(MSR_EFER
, efer
);
311 wrmsrl(MSR_EFER
, efer
| MSR_EFER_SVME_MASK
);
313 wrmsrl(MSR_VM_HSAVE_PA
,
314 page_to_pfn(svm_data
->save_area
) << PAGE_SHIFT
);
317 static int svm_cpu_init(int cpu
)
319 struct svm_cpu_data
*svm_data
;
322 svm_data
= kzalloc(sizeof(struct svm_cpu_data
), GFP_KERNEL
);
326 svm_data
->save_area
= alloc_page(GFP_KERNEL
);
328 if (!svm_data
->save_area
)
331 per_cpu(svm_data
, cpu
) = svm_data
;
341 static void set_msr_interception(u32
*msrpm
, unsigned msr
,
346 for (i
= 0; i
< NUM_MSR_MAPS
; i
++) {
347 if (msr
>= msrpm_ranges
[i
] &&
348 msr
< msrpm_ranges
[i
] + MSRS_IN_RANGE
) {
349 u32 msr_offset
= (i
* MSRS_IN_RANGE
+ msr
-
350 msrpm_ranges
[i
]) * 2;
352 u32
*base
= msrpm
+ (msr_offset
/ 32);
353 u32 msr_shift
= msr_offset
% 32;
354 u32 mask
= ((write
) ? 0 : 2) | ((read
) ? 0 : 1);
355 *base
= (*base
& ~(0x3 << msr_shift
)) |
363 static __init
int svm_hardware_setup(void)
366 struct page
*iopm_pages
;
367 struct page
*msrpm_pages
;
368 void *iopm_va
, *msrpm_va
;
371 iopm_pages
= alloc_pages(GFP_KERNEL
, IOPM_ALLOC_ORDER
);
376 iopm_va
= page_address(iopm_pages
);
377 memset(iopm_va
, 0xff, PAGE_SIZE
* (1 << IOPM_ALLOC_ORDER
));
378 clear_bit(0x80, iopm_va
); /* allow direct access to PC debug port */
379 iopm_base
= page_to_pfn(iopm_pages
) << PAGE_SHIFT
;
382 msrpm_pages
= alloc_pages(GFP_KERNEL
, MSRPM_ALLOC_ORDER
);
388 msrpm_va
= page_address(msrpm_pages
);
389 memset(msrpm_va
, 0xff, PAGE_SIZE
* (1 << MSRPM_ALLOC_ORDER
));
390 msrpm_base
= page_to_pfn(msrpm_pages
) << PAGE_SHIFT
;
393 set_msr_interception(msrpm_va
, MSR_GS_BASE
, 1, 1);
394 set_msr_interception(msrpm_va
, MSR_FS_BASE
, 1, 1);
395 set_msr_interception(msrpm_va
, MSR_KERNEL_GS_BASE
, 1, 1);
396 set_msr_interception(msrpm_va
, MSR_LSTAR
, 1, 1);
397 set_msr_interception(msrpm_va
, MSR_CSTAR
, 1, 1);
398 set_msr_interception(msrpm_va
, MSR_SYSCALL_MASK
, 1, 1);
400 set_msr_interception(msrpm_va
, MSR_K6_STAR
, 1, 1);
401 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_CS
, 1, 1);
402 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_ESP
, 1, 1);
403 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_EIP
, 1, 1);
405 for_each_online_cpu(cpu
) {
406 r
= svm_cpu_init(cpu
);
413 __free_pages(msrpm_pages
, MSRPM_ALLOC_ORDER
);
416 __free_pages(iopm_pages
, IOPM_ALLOC_ORDER
);
421 static __exit
void svm_hardware_unsetup(void)
423 __free_pages(pfn_to_page(msrpm_base
>> PAGE_SHIFT
), MSRPM_ALLOC_ORDER
);
424 __free_pages(pfn_to_page(iopm_base
>> PAGE_SHIFT
), IOPM_ALLOC_ORDER
);
425 iopm_base
= msrpm_base
= 0;
428 static void init_seg(struct vmcb_seg
*seg
)
431 seg
->attrib
= SVM_SELECTOR_P_MASK
| SVM_SELECTOR_S_MASK
|
432 SVM_SELECTOR_WRITE_MASK
; /* Read/Write Data Segment */
437 static void init_sys_seg(struct vmcb_seg
*seg
, uint32_t type
)
440 seg
->attrib
= SVM_SELECTOR_P_MASK
| type
;
445 static void init_vmcb(struct vmcb
*vmcb
)
447 struct vmcb_control_area
*control
= &vmcb
->control
;
448 struct vmcb_save_area
*save
= &vmcb
->save
;
450 control
->intercept_cr_read
= INTERCEPT_CR0_MASK
|
455 control
->intercept_cr_write
= INTERCEPT_CR0_MASK
|
460 control
->intercept_dr_read
= INTERCEPT_DR0_MASK
|
465 control
->intercept_dr_write
= INTERCEPT_DR0_MASK
|
472 control
->intercept_exceptions
= (1 << PF_VECTOR
) |
476 control
->intercept
= (1ULL << INTERCEPT_INTR
) |
477 (1ULL << INTERCEPT_NMI
) |
478 (1ULL << INTERCEPT_SMI
) |
480 * selective cr0 intercept bug?
481 * 0: 0f 22 d8 mov %eax,%cr3
482 * 3: 0f 20 c0 mov %cr0,%eax
483 * 6: 0d 00 00 00 80 or $0x80000000,%eax
484 * b: 0f 22 c0 mov %eax,%cr0
485 * set cr3 ->interception
486 * get cr0 ->interception
487 * set cr0 -> no interception
489 /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */
490 (1ULL << INTERCEPT_CPUID
) |
491 (1ULL << INTERCEPT_INVD
) |
492 (1ULL << INTERCEPT_HLT
) |
493 (1ULL << INTERCEPT_INVLPGA
) |
494 (1ULL << INTERCEPT_IOIO_PROT
) |
495 (1ULL << INTERCEPT_MSR_PROT
) |
496 (1ULL << INTERCEPT_TASK_SWITCH
) |
497 (1ULL << INTERCEPT_SHUTDOWN
) |
498 (1ULL << INTERCEPT_VMRUN
) |
499 (1ULL << INTERCEPT_VMMCALL
) |
500 (1ULL << INTERCEPT_VMLOAD
) |
501 (1ULL << INTERCEPT_VMSAVE
) |
502 (1ULL << INTERCEPT_STGI
) |
503 (1ULL << INTERCEPT_CLGI
) |
504 (1ULL << INTERCEPT_SKINIT
) |
505 (1ULL << INTERCEPT_WBINVD
) |
506 (1ULL << INTERCEPT_MONITOR
) |
507 (1ULL << INTERCEPT_MWAIT
);
509 control
->iopm_base_pa
= iopm_base
;
510 control
->msrpm_base_pa
= msrpm_base
;
511 control
->tsc_offset
= 0;
512 control
->int_ctl
= V_INTR_MASKING_MASK
;
520 save
->cs
.selector
= 0xf000;
521 /* Executable/Readable Code Segment */
522 save
->cs
.attrib
= SVM_SELECTOR_READ_MASK
| SVM_SELECTOR_P_MASK
|
523 SVM_SELECTOR_S_MASK
| SVM_SELECTOR_CODE_MASK
;
524 save
->cs
.limit
= 0xffff;
526 * cs.base should really be 0xffff0000, but vmx can't handle that, so
527 * be consistent with it.
529 * Replace when we have real mode working for vmx.
531 save
->cs
.base
= 0xf0000;
533 save
->gdtr
.limit
= 0xffff;
534 save
->idtr
.limit
= 0xffff;
536 init_sys_seg(&save
->ldtr
, SEG_TYPE_LDT
);
537 init_sys_seg(&save
->tr
, SEG_TYPE_BUSY_TSS16
);
539 save
->efer
= MSR_EFER_SVME_MASK
;
540 save
->dr6
= 0xffff0ff0;
543 save
->rip
= 0x0000fff0;
546 * cr0 val on cpu init should be 0x60000010, we enable cpu
547 * cache by default. the orderly way is to enable cache in bios.
549 save
->cr0
= 0x00000010 | X86_CR0_PG
| X86_CR0_WP
;
550 save
->cr4
= X86_CR4_PAE
;
554 static int svm_vcpu_reset(struct kvm_vcpu
*vcpu
)
556 struct vcpu_svm
*svm
= to_svm(vcpu
);
558 init_vmcb(svm
->vmcb
);
560 if (vcpu
->vcpu_id
!= 0) {
561 svm
->vmcb
->save
.rip
= 0;
562 svm
->vmcb
->save
.cs
.base
= svm
->vcpu
.sipi_vector
<< 12;
563 svm
->vmcb
->save
.cs
.selector
= svm
->vcpu
.sipi_vector
<< 8;
569 static struct kvm_vcpu
*svm_create_vcpu(struct kvm
*kvm
, unsigned int id
)
571 struct vcpu_svm
*svm
;
575 svm
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
581 err
= kvm_vcpu_init(&svm
->vcpu
, kvm
, id
);
585 page
= alloc_page(GFP_KERNEL
);
591 svm
->vmcb
= page_address(page
);
592 clear_page(svm
->vmcb
);
593 svm
->vmcb_pa
= page_to_pfn(page
) << PAGE_SHIFT
;
594 svm
->asid_generation
= 0;
595 memset(svm
->db_regs
, 0, sizeof(svm
->db_regs
));
596 init_vmcb(svm
->vmcb
);
599 svm
->vcpu
.fpu_active
= 1;
600 svm
->vcpu
.apic_base
= 0xfee00000 | MSR_IA32_APICBASE_ENABLE
;
601 if (svm
->vcpu
.vcpu_id
== 0)
602 svm
->vcpu
.apic_base
|= MSR_IA32_APICBASE_BSP
;
607 kvm_vcpu_uninit(&svm
->vcpu
);
609 kmem_cache_free(kvm_vcpu_cache
, svm
);
614 static void svm_free_vcpu(struct kvm_vcpu
*vcpu
)
616 struct vcpu_svm
*svm
= to_svm(vcpu
);
618 __free_page(pfn_to_page(svm
->vmcb_pa
>> PAGE_SHIFT
));
619 kvm_vcpu_uninit(vcpu
);
620 kmem_cache_free(kvm_vcpu_cache
, svm
);
623 static void svm_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
625 struct vcpu_svm
*svm
= to_svm(vcpu
);
628 if (unlikely(cpu
!= vcpu
->cpu
)) {
632 * Make sure that the guest sees a monotonically
636 delta
= vcpu
->host_tsc
- tsc_this
;
637 svm
->vmcb
->control
.tsc_offset
+= delta
;
639 kvm_migrate_apic_timer(vcpu
);
642 for (i
= 0; i
< NR_HOST_SAVE_USER_MSRS
; i
++)
643 rdmsrl(host_save_user_msrs
[i
], svm
->host_user_msrs
[i
]);
646 static void svm_vcpu_put(struct kvm_vcpu
*vcpu
)
648 struct vcpu_svm
*svm
= to_svm(vcpu
);
651 ++vcpu
->stat
.host_state_reload
;
652 for (i
= 0; i
< NR_HOST_SAVE_USER_MSRS
; i
++)
653 wrmsrl(host_save_user_msrs
[i
], svm
->host_user_msrs
[i
]);
655 rdtscll(vcpu
->host_tsc
);
658 static void svm_vcpu_decache(struct kvm_vcpu
*vcpu
)
662 static void svm_cache_regs(struct kvm_vcpu
*vcpu
)
664 struct vcpu_svm
*svm
= to_svm(vcpu
);
666 vcpu
->regs
[VCPU_REGS_RAX
] = svm
->vmcb
->save
.rax
;
667 vcpu
->regs
[VCPU_REGS_RSP
] = svm
->vmcb
->save
.rsp
;
668 vcpu
->rip
= svm
->vmcb
->save
.rip
;
671 static void svm_decache_regs(struct kvm_vcpu
*vcpu
)
673 struct vcpu_svm
*svm
= to_svm(vcpu
);
674 svm
->vmcb
->save
.rax
= vcpu
->regs
[VCPU_REGS_RAX
];
675 svm
->vmcb
->save
.rsp
= vcpu
->regs
[VCPU_REGS_RSP
];
676 svm
->vmcb
->save
.rip
= vcpu
->rip
;
679 static unsigned long svm_get_rflags(struct kvm_vcpu
*vcpu
)
681 return to_svm(vcpu
)->vmcb
->save
.rflags
;
684 static void svm_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
)
686 to_svm(vcpu
)->vmcb
->save
.rflags
= rflags
;
689 static struct vmcb_seg
*svm_seg(struct kvm_vcpu
*vcpu
, int seg
)
691 struct vmcb_save_area
*save
= &to_svm(vcpu
)->vmcb
->save
;
694 case VCPU_SREG_CS
: return &save
->cs
;
695 case VCPU_SREG_DS
: return &save
->ds
;
696 case VCPU_SREG_ES
: return &save
->es
;
697 case VCPU_SREG_FS
: return &save
->fs
;
698 case VCPU_SREG_GS
: return &save
->gs
;
699 case VCPU_SREG_SS
: return &save
->ss
;
700 case VCPU_SREG_TR
: return &save
->tr
;
701 case VCPU_SREG_LDTR
: return &save
->ldtr
;
707 static u64
svm_get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
709 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
714 static void svm_get_segment(struct kvm_vcpu
*vcpu
,
715 struct kvm_segment
*var
, int seg
)
717 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
720 var
->limit
= s
->limit
;
721 var
->selector
= s
->selector
;
722 var
->type
= s
->attrib
& SVM_SELECTOR_TYPE_MASK
;
723 var
->s
= (s
->attrib
>> SVM_SELECTOR_S_SHIFT
) & 1;
724 var
->dpl
= (s
->attrib
>> SVM_SELECTOR_DPL_SHIFT
) & 3;
725 var
->present
= (s
->attrib
>> SVM_SELECTOR_P_SHIFT
) & 1;
726 var
->avl
= (s
->attrib
>> SVM_SELECTOR_AVL_SHIFT
) & 1;
727 var
->l
= (s
->attrib
>> SVM_SELECTOR_L_SHIFT
) & 1;
728 var
->db
= (s
->attrib
>> SVM_SELECTOR_DB_SHIFT
) & 1;
729 var
->g
= (s
->attrib
>> SVM_SELECTOR_G_SHIFT
) & 1;
730 var
->unusable
= !var
->present
;
733 static void svm_get_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
735 struct vcpu_svm
*svm
= to_svm(vcpu
);
737 dt
->limit
= svm
->vmcb
->save
.idtr
.limit
;
738 dt
->base
= svm
->vmcb
->save
.idtr
.base
;
741 static void svm_set_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
743 struct vcpu_svm
*svm
= to_svm(vcpu
);
745 svm
->vmcb
->save
.idtr
.limit
= dt
->limit
;
746 svm
->vmcb
->save
.idtr
.base
= dt
->base
;
749 static void svm_get_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
751 struct vcpu_svm
*svm
= to_svm(vcpu
);
753 dt
->limit
= svm
->vmcb
->save
.gdtr
.limit
;
754 dt
->base
= svm
->vmcb
->save
.gdtr
.base
;
757 static void svm_set_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
759 struct vcpu_svm
*svm
= to_svm(vcpu
);
761 svm
->vmcb
->save
.gdtr
.limit
= dt
->limit
;
762 svm
->vmcb
->save
.gdtr
.base
= dt
->base
;
765 static void svm_decache_cr4_guest_bits(struct kvm_vcpu
*vcpu
)
769 static void svm_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
771 struct vcpu_svm
*svm
= to_svm(vcpu
);
774 if (vcpu
->shadow_efer
& EFER_LME
) {
775 if (!is_paging(vcpu
) && (cr0
& X86_CR0_PG
)) {
776 vcpu
->shadow_efer
|= EFER_LMA
;
777 svm
->vmcb
->save
.efer
|= EFER_LMA
| EFER_LME
;
780 if (is_paging(vcpu
) && !(cr0
& X86_CR0_PG
)) {
781 vcpu
->shadow_efer
&= ~EFER_LMA
;
782 svm
->vmcb
->save
.efer
&= ~(EFER_LMA
| EFER_LME
);
786 if ((vcpu
->cr0
& X86_CR0_TS
) && !(cr0
& X86_CR0_TS
)) {
787 svm
->vmcb
->control
.intercept_exceptions
&= ~(1 << NM_VECTOR
);
788 vcpu
->fpu_active
= 1;
792 cr0
|= X86_CR0_PG
| X86_CR0_WP
;
793 cr0
&= ~(X86_CR0_CD
| X86_CR0_NW
);
794 svm
->vmcb
->save
.cr0
= cr0
;
797 static void svm_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
800 to_svm(vcpu
)->vmcb
->save
.cr4
= cr4
| X86_CR4_PAE
;
803 static void svm_set_segment(struct kvm_vcpu
*vcpu
,
804 struct kvm_segment
*var
, int seg
)
806 struct vcpu_svm
*svm
= to_svm(vcpu
);
807 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
810 s
->limit
= var
->limit
;
811 s
->selector
= var
->selector
;
815 s
->attrib
= (var
->type
& SVM_SELECTOR_TYPE_MASK
);
816 s
->attrib
|= (var
->s
& 1) << SVM_SELECTOR_S_SHIFT
;
817 s
->attrib
|= (var
->dpl
& 3) << SVM_SELECTOR_DPL_SHIFT
;
818 s
->attrib
|= (var
->present
& 1) << SVM_SELECTOR_P_SHIFT
;
819 s
->attrib
|= (var
->avl
& 1) << SVM_SELECTOR_AVL_SHIFT
;
820 s
->attrib
|= (var
->l
& 1) << SVM_SELECTOR_L_SHIFT
;
821 s
->attrib
|= (var
->db
& 1) << SVM_SELECTOR_DB_SHIFT
;
822 s
->attrib
|= (var
->g
& 1) << SVM_SELECTOR_G_SHIFT
;
824 if (seg
== VCPU_SREG_CS
)
826 = (svm
->vmcb
->save
.cs
.attrib
827 >> SVM_SELECTOR_DPL_SHIFT
) & 3;
833 svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK;
834 svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
838 static int svm_guest_debug(struct kvm_vcpu
*vcpu
, struct kvm_debug_guest
*dbg
)
843 static int svm_get_irq(struct kvm_vcpu
*vcpu
)
845 struct vcpu_svm
*svm
= to_svm(vcpu
);
846 u32 exit_int_info
= svm
->vmcb
->control
.exit_int_info
;
848 if (is_external_interrupt(exit_int_info
))
849 return exit_int_info
& SVM_EVTINJ_VEC_MASK
;
853 static void load_host_msrs(struct kvm_vcpu
*vcpu
)
856 wrmsrl(MSR_GS_BASE
, to_svm(vcpu
)->host_gs_base
);
860 static void save_host_msrs(struct kvm_vcpu
*vcpu
)
863 rdmsrl(MSR_GS_BASE
, to_svm(vcpu
)->host_gs_base
);
867 static void new_asid(struct vcpu_svm
*svm
, struct svm_cpu_data
*svm_data
)
869 if (svm_data
->next_asid
> svm_data
->max_asid
) {
870 ++svm_data
->asid_generation
;
871 svm_data
->next_asid
= 1;
872 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_FLUSH_ALL_ASID
;
875 svm
->vcpu
.cpu
= svm_data
->cpu
;
876 svm
->asid_generation
= svm_data
->asid_generation
;
877 svm
->vmcb
->control
.asid
= svm_data
->next_asid
++;
880 static unsigned long svm_get_dr(struct kvm_vcpu
*vcpu
, int dr
)
882 return to_svm(vcpu
)->db_regs
[dr
];
885 static void svm_set_dr(struct kvm_vcpu
*vcpu
, int dr
, unsigned long value
,
888 struct vcpu_svm
*svm
= to_svm(vcpu
);
892 if (svm
->vmcb
->save
.dr7
& DR7_GD_MASK
) {
893 svm
->vmcb
->save
.dr7
&= ~DR7_GD_MASK
;
894 svm
->vmcb
->save
.dr6
|= DR6_BD_MASK
;
895 *exception
= DB_VECTOR
;
901 svm
->db_regs
[dr
] = value
;
904 if (vcpu
->cr4
& X86_CR4_DE
) {
905 *exception
= UD_VECTOR
;
909 if (value
& ~((1ULL << 32) - 1)) {
910 *exception
= GP_VECTOR
;
913 svm
->vmcb
->save
.dr7
= value
;
917 printk(KERN_DEBUG
"%s: unexpected dr %u\n",
919 *exception
= UD_VECTOR
;
924 static int pf_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
926 u32 exit_int_info
= svm
->vmcb
->control
.exit_int_info
;
927 struct kvm
*kvm
= svm
->vcpu
.kvm
;
931 if (!irqchip_in_kernel(kvm
) &&
932 is_external_interrupt(exit_int_info
))
933 push_irq(&svm
->vcpu
, exit_int_info
& SVM_EVTINJ_VEC_MASK
);
935 fault_address
= svm
->vmcb
->control
.exit_info_2
;
936 error_code
= svm
->vmcb
->control
.exit_info_1
;
937 return kvm_mmu_page_fault(&svm
->vcpu
, fault_address
, error_code
);
940 static int ud_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
944 er
= emulate_instruction(&svm
->vcpu
, kvm_run
, 0, 0, 0);
945 if (er
!= EMULATE_DONE
)
946 kvm_queue_exception(&svm
->vcpu
, UD_VECTOR
);
950 static int nm_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
952 svm
->vmcb
->control
.intercept_exceptions
&= ~(1 << NM_VECTOR
);
953 if (!(svm
->vcpu
.cr0
& X86_CR0_TS
))
954 svm
->vmcb
->save
.cr0
&= ~X86_CR0_TS
;
955 svm
->vcpu
.fpu_active
= 1;
960 static int shutdown_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
963 * VMCB is undefined after a SHUTDOWN intercept
964 * so reinitialize it.
966 clear_page(svm
->vmcb
);
967 init_vmcb(svm
->vmcb
);
969 kvm_run
->exit_reason
= KVM_EXIT_SHUTDOWN
;
973 static int io_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
975 u32 io_info
= svm
->vmcb
->control
.exit_info_1
; /* address size bug? */
976 int size
, down
, in
, string
, rep
;
979 ++svm
->vcpu
.stat
.io_exits
;
981 svm
->next_rip
= svm
->vmcb
->control
.exit_info_2
;
983 string
= (io_info
& SVM_IOIO_STR_MASK
) != 0;
986 if (emulate_instruction(&svm
->vcpu
,
987 kvm_run
, 0, 0, 0) == EMULATE_DO_MMIO
)
992 in
= (io_info
& SVM_IOIO_TYPE_MASK
) != 0;
993 port
= io_info
>> 16;
994 size
= (io_info
& SVM_IOIO_SIZE_MASK
) >> SVM_IOIO_SIZE_SHIFT
;
995 rep
= (io_info
& SVM_IOIO_REP_MASK
) != 0;
996 down
= (svm
->vmcb
->save
.rflags
& X86_EFLAGS_DF
) != 0;
998 return kvm_emulate_pio(&svm
->vcpu
, kvm_run
, in
, size
, port
);
1001 static int nop_on_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1006 static int halt_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1008 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 1;
1009 skip_emulated_instruction(&svm
->vcpu
);
1010 return kvm_emulate_halt(&svm
->vcpu
);
1013 static int vmmcall_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1015 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 3;
1016 skip_emulated_instruction(&svm
->vcpu
);
1017 kvm_emulate_hypercall(&svm
->vcpu
);
1021 static int invalid_op_interception(struct vcpu_svm
*svm
,
1022 struct kvm_run
*kvm_run
)
1024 kvm_queue_exception(&svm
->vcpu
, UD_VECTOR
);
1028 static int task_switch_interception(struct vcpu_svm
*svm
,
1029 struct kvm_run
*kvm_run
)
1031 pr_unimpl(&svm
->vcpu
, "%s: task switch is unsupported\n", __FUNCTION__
);
1032 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1036 static int cpuid_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1038 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 2;
1039 kvm_emulate_cpuid(&svm
->vcpu
);
1043 static int emulate_on_interception(struct vcpu_svm
*svm
,
1044 struct kvm_run
*kvm_run
)
1046 if (emulate_instruction(&svm
->vcpu
, NULL
, 0, 0, 0) != EMULATE_DONE
)
1047 pr_unimpl(&svm
->vcpu
, "%s: failed\n", __FUNCTION__
);
1051 static int cr8_write_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1053 emulate_instruction(&svm
->vcpu
, NULL
, 0, 0, 0);
1054 if (irqchip_in_kernel(svm
->vcpu
.kvm
))
1056 kvm_run
->exit_reason
= KVM_EXIT_SET_TPR
;
1060 static int svm_get_msr(struct kvm_vcpu
*vcpu
, unsigned ecx
, u64
*data
)
1062 struct vcpu_svm
*svm
= to_svm(vcpu
);
1065 case MSR_IA32_TIME_STAMP_COUNTER
: {
1069 *data
= svm
->vmcb
->control
.tsc_offset
+ tsc
;
1073 *data
= svm
->vmcb
->save
.star
;
1075 #ifdef CONFIG_X86_64
1077 *data
= svm
->vmcb
->save
.lstar
;
1080 *data
= svm
->vmcb
->save
.cstar
;
1082 case MSR_KERNEL_GS_BASE
:
1083 *data
= svm
->vmcb
->save
.kernel_gs_base
;
1085 case MSR_SYSCALL_MASK
:
1086 *data
= svm
->vmcb
->save
.sfmask
;
1089 case MSR_IA32_SYSENTER_CS
:
1090 *data
= svm
->vmcb
->save
.sysenter_cs
;
1092 case MSR_IA32_SYSENTER_EIP
:
1093 *data
= svm
->vmcb
->save
.sysenter_eip
;
1095 case MSR_IA32_SYSENTER_ESP
:
1096 *data
= svm
->vmcb
->save
.sysenter_esp
;
1099 return kvm_get_msr_common(vcpu
, ecx
, data
);
1104 static int rdmsr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1106 u32 ecx
= svm
->vcpu
.regs
[VCPU_REGS_RCX
];
1109 if (svm_get_msr(&svm
->vcpu
, ecx
, &data
))
1110 kvm_inject_gp(&svm
->vcpu
, 0);
1112 svm
->vmcb
->save
.rax
= data
& 0xffffffff;
1113 svm
->vcpu
.regs
[VCPU_REGS_RDX
] = data
>> 32;
1114 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 2;
1115 skip_emulated_instruction(&svm
->vcpu
);
1120 static int svm_set_msr(struct kvm_vcpu
*vcpu
, unsigned ecx
, u64 data
)
1122 struct vcpu_svm
*svm
= to_svm(vcpu
);
1125 case MSR_IA32_TIME_STAMP_COUNTER
: {
1129 svm
->vmcb
->control
.tsc_offset
= data
- tsc
;
1133 svm
->vmcb
->save
.star
= data
;
1135 #ifdef CONFIG_X86_64
1137 svm
->vmcb
->save
.lstar
= data
;
1140 svm
->vmcb
->save
.cstar
= data
;
1142 case MSR_KERNEL_GS_BASE
:
1143 svm
->vmcb
->save
.kernel_gs_base
= data
;
1145 case MSR_SYSCALL_MASK
:
1146 svm
->vmcb
->save
.sfmask
= data
;
1149 case MSR_IA32_SYSENTER_CS
:
1150 svm
->vmcb
->save
.sysenter_cs
= data
;
1152 case MSR_IA32_SYSENTER_EIP
:
1153 svm
->vmcb
->save
.sysenter_eip
= data
;
1155 case MSR_IA32_SYSENTER_ESP
:
1156 svm
->vmcb
->save
.sysenter_esp
= data
;
1158 case MSR_K7_EVNTSEL0
:
1159 case MSR_K7_EVNTSEL1
:
1160 case MSR_K7_EVNTSEL2
:
1161 case MSR_K7_EVNTSEL3
:
1163 * only support writing 0 to the performance counters for now
1164 * to make Windows happy. Should be replaced by a real
1165 * performance counter emulation later.
1172 return kvm_set_msr_common(vcpu
, ecx
, data
);
1177 static int wrmsr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1179 u32 ecx
= svm
->vcpu
.regs
[VCPU_REGS_RCX
];
1180 u64 data
= (svm
->vmcb
->save
.rax
& -1u)
1181 | ((u64
)(svm
->vcpu
.regs
[VCPU_REGS_RDX
] & -1u) << 32);
1182 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 2;
1183 if (svm_set_msr(&svm
->vcpu
, ecx
, data
))
1184 kvm_inject_gp(&svm
->vcpu
, 0);
1186 skip_emulated_instruction(&svm
->vcpu
);
1190 static int msr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1192 if (svm
->vmcb
->control
.exit_info_1
)
1193 return wrmsr_interception(svm
, kvm_run
);
1195 return rdmsr_interception(svm
, kvm_run
);
1198 static int interrupt_window_interception(struct vcpu_svm
*svm
,
1199 struct kvm_run
*kvm_run
)
1201 svm
->vmcb
->control
.intercept
&= ~(1ULL << INTERCEPT_VINTR
);
1202 svm
->vmcb
->control
.int_ctl
&= ~V_IRQ_MASK
;
1204 * If the user space waits to inject interrupts, exit as soon as
1207 if (kvm_run
->request_interrupt_window
&&
1208 !svm
->vcpu
.irq_summary
) {
1209 ++svm
->vcpu
.stat
.irq_window_exits
;
1210 kvm_run
->exit_reason
= KVM_EXIT_IRQ_WINDOW_OPEN
;
1217 static int (*svm_exit_handlers
[])(struct vcpu_svm
*svm
,
1218 struct kvm_run
*kvm_run
) = {
1219 [SVM_EXIT_READ_CR0
] = emulate_on_interception
,
1220 [SVM_EXIT_READ_CR3
] = emulate_on_interception
,
1221 [SVM_EXIT_READ_CR4
] = emulate_on_interception
,
1222 [SVM_EXIT_READ_CR8
] = emulate_on_interception
,
1224 [SVM_EXIT_WRITE_CR0
] = emulate_on_interception
,
1225 [SVM_EXIT_WRITE_CR3
] = emulate_on_interception
,
1226 [SVM_EXIT_WRITE_CR4
] = emulate_on_interception
,
1227 [SVM_EXIT_WRITE_CR8
] = cr8_write_interception
,
1228 [SVM_EXIT_READ_DR0
] = emulate_on_interception
,
1229 [SVM_EXIT_READ_DR1
] = emulate_on_interception
,
1230 [SVM_EXIT_READ_DR2
] = emulate_on_interception
,
1231 [SVM_EXIT_READ_DR3
] = emulate_on_interception
,
1232 [SVM_EXIT_WRITE_DR0
] = emulate_on_interception
,
1233 [SVM_EXIT_WRITE_DR1
] = emulate_on_interception
,
1234 [SVM_EXIT_WRITE_DR2
] = emulate_on_interception
,
1235 [SVM_EXIT_WRITE_DR3
] = emulate_on_interception
,
1236 [SVM_EXIT_WRITE_DR5
] = emulate_on_interception
,
1237 [SVM_EXIT_WRITE_DR7
] = emulate_on_interception
,
1238 [SVM_EXIT_EXCP_BASE
+ UD_VECTOR
] = ud_interception
,
1239 [SVM_EXIT_EXCP_BASE
+ PF_VECTOR
] = pf_interception
,
1240 [SVM_EXIT_EXCP_BASE
+ NM_VECTOR
] = nm_interception
,
1241 [SVM_EXIT_INTR
] = nop_on_interception
,
1242 [SVM_EXIT_NMI
] = nop_on_interception
,
1243 [SVM_EXIT_SMI
] = nop_on_interception
,
1244 [SVM_EXIT_INIT
] = nop_on_interception
,
1245 [SVM_EXIT_VINTR
] = interrupt_window_interception
,
1246 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1247 [SVM_EXIT_CPUID
] = cpuid_interception
,
1248 [SVM_EXIT_INVD
] = emulate_on_interception
,
1249 [SVM_EXIT_HLT
] = halt_interception
,
1250 [SVM_EXIT_INVLPG
] = emulate_on_interception
,
1251 [SVM_EXIT_INVLPGA
] = invalid_op_interception
,
1252 [SVM_EXIT_IOIO
] = io_interception
,
1253 [SVM_EXIT_MSR
] = msr_interception
,
1254 [SVM_EXIT_TASK_SWITCH
] = task_switch_interception
,
1255 [SVM_EXIT_SHUTDOWN
] = shutdown_interception
,
1256 [SVM_EXIT_VMRUN
] = invalid_op_interception
,
1257 [SVM_EXIT_VMMCALL
] = vmmcall_interception
,
1258 [SVM_EXIT_VMLOAD
] = invalid_op_interception
,
1259 [SVM_EXIT_VMSAVE
] = invalid_op_interception
,
1260 [SVM_EXIT_STGI
] = invalid_op_interception
,
1261 [SVM_EXIT_CLGI
] = invalid_op_interception
,
1262 [SVM_EXIT_SKINIT
] = invalid_op_interception
,
1263 [SVM_EXIT_WBINVD
] = emulate_on_interception
,
1264 [SVM_EXIT_MONITOR
] = invalid_op_interception
,
1265 [SVM_EXIT_MWAIT
] = invalid_op_interception
,
1269 static int handle_exit(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
1271 struct vcpu_svm
*svm
= to_svm(vcpu
);
1272 u32 exit_code
= svm
->vmcb
->control
.exit_code
;
1276 if (svm
->vmcb
->control
.exit_code
== SVM_EXIT_ERR
) {
1277 kvm_run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
1278 kvm_run
->fail_entry
.hardware_entry_failure_reason
1279 = svm
->vmcb
->control
.exit_code
;
1283 if (is_external_interrupt(svm
->vmcb
->control
.exit_int_info
) &&
1284 exit_code
!= SVM_EXIT_EXCP_BASE
+ PF_VECTOR
)
1285 printk(KERN_ERR
"%s: unexpected exit_ini_info 0x%x "
1287 __FUNCTION__
, svm
->vmcb
->control
.exit_int_info
,
1290 if (exit_code
>= ARRAY_SIZE(svm_exit_handlers
)
1291 || !svm_exit_handlers
[exit_code
]) {
1292 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1293 kvm_run
->hw
.hardware_exit_reason
= exit_code
;
1297 return svm_exit_handlers
[exit_code
](svm
, kvm_run
);
1300 static void reload_tss(struct kvm_vcpu
*vcpu
)
1302 int cpu
= raw_smp_processor_id();
1304 struct svm_cpu_data
*svm_data
= per_cpu(svm_data
, cpu
);
1305 svm_data
->tss_desc
->type
= 9; /* available 32/64-bit TSS */
1309 static void pre_svm_run(struct vcpu_svm
*svm
)
1311 int cpu
= raw_smp_processor_id();
1313 struct svm_cpu_data
*svm_data
= per_cpu(svm_data
, cpu
);
1315 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_DO_NOTHING
;
1316 if (svm
->vcpu
.cpu
!= cpu
||
1317 svm
->asid_generation
!= svm_data
->asid_generation
)
1318 new_asid(svm
, svm_data
);
1322 static inline void svm_inject_irq(struct vcpu_svm
*svm
, int irq
)
1324 struct vmcb_control_area
*control
;
1326 control
= &svm
->vmcb
->control
;
1327 control
->int_vector
= irq
;
1328 control
->int_ctl
&= ~V_INTR_PRIO_MASK
;
1329 control
->int_ctl
|= V_IRQ_MASK
|
1330 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT
);
1333 static void svm_set_irq(struct kvm_vcpu
*vcpu
, int irq
)
1335 struct vcpu_svm
*svm
= to_svm(vcpu
);
1337 svm_inject_irq(svm
, irq
);
1340 static void svm_intr_assist(struct kvm_vcpu
*vcpu
)
1342 struct vcpu_svm
*svm
= to_svm(vcpu
);
1343 struct vmcb
*vmcb
= svm
->vmcb
;
1344 int intr_vector
= -1;
1346 if ((vmcb
->control
.exit_int_info
& SVM_EVTINJ_VALID
) &&
1347 ((vmcb
->control
.exit_int_info
& SVM_EVTINJ_TYPE_MASK
) == 0)) {
1348 intr_vector
= vmcb
->control
.exit_int_info
&
1349 SVM_EVTINJ_VEC_MASK
;
1350 vmcb
->control
.exit_int_info
= 0;
1351 svm_inject_irq(svm
, intr_vector
);
1355 if (vmcb
->control
.int_ctl
& V_IRQ_MASK
)
1358 if (!kvm_cpu_has_interrupt(vcpu
))
1361 if (!(vmcb
->save
.rflags
& X86_EFLAGS_IF
) ||
1362 (vmcb
->control
.int_state
& SVM_INTERRUPT_SHADOW_MASK
) ||
1363 (vmcb
->control
.event_inj
& SVM_EVTINJ_VALID
)) {
1364 /* unable to deliver irq, set pending irq */
1365 vmcb
->control
.intercept
|= (1ULL << INTERCEPT_VINTR
);
1366 svm_inject_irq(svm
, 0x0);
1369 /* Okay, we can deliver the interrupt: grab it and update PIC state. */
1370 intr_vector
= kvm_cpu_get_interrupt(vcpu
);
1371 svm_inject_irq(svm
, intr_vector
);
1372 kvm_timer_intr_post(vcpu
, intr_vector
);
1375 static void kvm_reput_irq(struct vcpu_svm
*svm
)
1377 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
1379 if ((control
->int_ctl
& V_IRQ_MASK
)
1380 && !irqchip_in_kernel(svm
->vcpu
.kvm
)) {
1381 control
->int_ctl
&= ~V_IRQ_MASK
;
1382 push_irq(&svm
->vcpu
, control
->int_vector
);
1385 svm
->vcpu
.interrupt_window_open
=
1386 !(control
->int_state
& SVM_INTERRUPT_SHADOW_MASK
);
1389 static void svm_do_inject_vector(struct vcpu_svm
*svm
)
1391 struct kvm_vcpu
*vcpu
= &svm
->vcpu
;
1392 int word_index
= __ffs(vcpu
->irq_summary
);
1393 int bit_index
= __ffs(vcpu
->irq_pending
[word_index
]);
1394 int irq
= word_index
* BITS_PER_LONG
+ bit_index
;
1396 clear_bit(bit_index
, &vcpu
->irq_pending
[word_index
]);
1397 if (!vcpu
->irq_pending
[word_index
])
1398 clear_bit(word_index
, &vcpu
->irq_summary
);
1399 svm_inject_irq(svm
, irq
);
1402 static void do_interrupt_requests(struct kvm_vcpu
*vcpu
,
1403 struct kvm_run
*kvm_run
)
1405 struct vcpu_svm
*svm
= to_svm(vcpu
);
1406 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
1408 svm
->vcpu
.interrupt_window_open
=
1409 (!(control
->int_state
& SVM_INTERRUPT_SHADOW_MASK
) &&
1410 (svm
->vmcb
->save
.rflags
& X86_EFLAGS_IF
));
1412 if (svm
->vcpu
.interrupt_window_open
&& svm
->vcpu
.irq_summary
)
1414 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1416 svm_do_inject_vector(svm
);
1419 * Interrupts blocked. Wait for unblock.
1421 if (!svm
->vcpu
.interrupt_window_open
&&
1422 (svm
->vcpu
.irq_summary
|| kvm_run
->request_interrupt_window
))
1423 control
->intercept
|= 1ULL << INTERCEPT_VINTR
;
1425 control
->intercept
&= ~(1ULL << INTERCEPT_VINTR
);
1428 static int svm_set_tss_addr(struct kvm
*kvm
, unsigned int addr
)
1433 static void save_db_regs(unsigned long *db_regs
)
1435 asm volatile ("mov %%dr0, %0" : "=r"(db_regs
[0]));
1436 asm volatile ("mov %%dr1, %0" : "=r"(db_regs
[1]));
1437 asm volatile ("mov %%dr2, %0" : "=r"(db_regs
[2]));
1438 asm volatile ("mov %%dr3, %0" : "=r"(db_regs
[3]));
1441 static void load_db_regs(unsigned long *db_regs
)
1443 asm volatile ("mov %0, %%dr0" : : "r"(db_regs
[0]));
1444 asm volatile ("mov %0, %%dr1" : : "r"(db_regs
[1]));
1445 asm volatile ("mov %0, %%dr2" : : "r"(db_regs
[2]));
1446 asm volatile ("mov %0, %%dr3" : : "r"(db_regs
[3]));
1449 static void svm_flush_tlb(struct kvm_vcpu
*vcpu
)
1451 force_new_asid(vcpu
);
1454 static void svm_prepare_guest_switch(struct kvm_vcpu
*vcpu
)
1458 static void svm_vcpu_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1460 struct vcpu_svm
*svm
= to_svm(vcpu
);
1467 save_host_msrs(vcpu
);
1468 fs_selector
= read_fs();
1469 gs_selector
= read_gs();
1470 ldt_selector
= read_ldt();
1471 svm
->host_cr2
= kvm_read_cr2();
1472 svm
->host_dr6
= read_dr6();
1473 svm
->host_dr7
= read_dr7();
1474 svm
->vmcb
->save
.cr2
= vcpu
->cr2
;
1476 if (svm
->vmcb
->save
.dr7
& 0xff) {
1478 save_db_regs(svm
->host_db_regs
);
1479 load_db_regs(svm
->db_regs
);
1487 #ifdef CONFIG_X86_64
1493 #ifdef CONFIG_X86_64
1494 "mov %c[rbx](%[svm]), %%rbx \n\t"
1495 "mov %c[rcx](%[svm]), %%rcx \n\t"
1496 "mov %c[rdx](%[svm]), %%rdx \n\t"
1497 "mov %c[rsi](%[svm]), %%rsi \n\t"
1498 "mov %c[rdi](%[svm]), %%rdi \n\t"
1499 "mov %c[rbp](%[svm]), %%rbp \n\t"
1500 "mov %c[r8](%[svm]), %%r8 \n\t"
1501 "mov %c[r9](%[svm]), %%r9 \n\t"
1502 "mov %c[r10](%[svm]), %%r10 \n\t"
1503 "mov %c[r11](%[svm]), %%r11 \n\t"
1504 "mov %c[r12](%[svm]), %%r12 \n\t"
1505 "mov %c[r13](%[svm]), %%r13 \n\t"
1506 "mov %c[r14](%[svm]), %%r14 \n\t"
1507 "mov %c[r15](%[svm]), %%r15 \n\t"
1509 "mov %c[rbx](%[svm]), %%ebx \n\t"
1510 "mov %c[rcx](%[svm]), %%ecx \n\t"
1511 "mov %c[rdx](%[svm]), %%edx \n\t"
1512 "mov %c[rsi](%[svm]), %%esi \n\t"
1513 "mov %c[rdi](%[svm]), %%edi \n\t"
1514 "mov %c[rbp](%[svm]), %%ebp \n\t"
1517 #ifdef CONFIG_X86_64
1518 /* Enter guest mode */
1520 "mov %c[vmcb](%[svm]), %%rax \n\t"
1526 /* Enter guest mode */
1528 "mov %c[vmcb](%[svm]), %%eax \n\t"
1535 /* Save guest registers, load host registers */
1536 #ifdef CONFIG_X86_64
1537 "mov %%rbx, %c[rbx](%[svm]) \n\t"
1538 "mov %%rcx, %c[rcx](%[svm]) \n\t"
1539 "mov %%rdx, %c[rdx](%[svm]) \n\t"
1540 "mov %%rsi, %c[rsi](%[svm]) \n\t"
1541 "mov %%rdi, %c[rdi](%[svm]) \n\t"
1542 "mov %%rbp, %c[rbp](%[svm]) \n\t"
1543 "mov %%r8, %c[r8](%[svm]) \n\t"
1544 "mov %%r9, %c[r9](%[svm]) \n\t"
1545 "mov %%r10, %c[r10](%[svm]) \n\t"
1546 "mov %%r11, %c[r11](%[svm]) \n\t"
1547 "mov %%r12, %c[r12](%[svm]) \n\t"
1548 "mov %%r13, %c[r13](%[svm]) \n\t"
1549 "mov %%r14, %c[r14](%[svm]) \n\t"
1550 "mov %%r15, %c[r15](%[svm]) \n\t"
1554 "mov %%ebx, %c[rbx](%[svm]) \n\t"
1555 "mov %%ecx, %c[rcx](%[svm]) \n\t"
1556 "mov %%edx, %c[rdx](%[svm]) \n\t"
1557 "mov %%esi, %c[rsi](%[svm]) \n\t"
1558 "mov %%edi, %c[rdi](%[svm]) \n\t"
1559 "mov %%ebp, %c[rbp](%[svm]) \n\t"
1565 [vmcb
]"i"(offsetof(struct vcpu_svm
, vmcb_pa
)),
1566 [rbx
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_RBX
])),
1567 [rcx
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_RCX
])),
1568 [rdx
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_RDX
])),
1569 [rsi
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_RSI
])),
1570 [rdi
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_RDI
])),
1571 [rbp
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_RBP
]))
1572 #ifdef CONFIG_X86_64
1573 , [r8
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_R8
])),
1574 [r9
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_R9
])),
1575 [r10
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_R10
])),
1576 [r11
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_R11
])),
1577 [r12
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_R12
])),
1578 [r13
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_R13
])),
1579 [r14
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_R14
])),
1580 [r15
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_R15
]))
1583 #ifdef CONFIG_X86_64
1584 , "rbx", "rcx", "rdx", "rsi", "rdi"
1585 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
1587 , "ebx", "ecx", "edx" , "esi", "edi"
1591 if ((svm
->vmcb
->save
.dr7
& 0xff))
1592 load_db_regs(svm
->host_db_regs
);
1594 vcpu
->cr2
= svm
->vmcb
->save
.cr2
;
1596 write_dr6(svm
->host_dr6
);
1597 write_dr7(svm
->host_dr7
);
1598 kvm_write_cr2(svm
->host_cr2
);
1600 load_fs(fs_selector
);
1601 load_gs(gs_selector
);
1602 load_ldt(ldt_selector
);
1603 load_host_msrs(vcpu
);
1607 local_irq_disable();
1614 static void svm_set_cr3(struct kvm_vcpu
*vcpu
, unsigned long root
)
1616 struct vcpu_svm
*svm
= to_svm(vcpu
);
1618 svm
->vmcb
->save
.cr3
= root
;
1619 force_new_asid(vcpu
);
1621 if (vcpu
->fpu_active
) {
1622 svm
->vmcb
->control
.intercept_exceptions
|= (1 << NM_VECTOR
);
1623 svm
->vmcb
->save
.cr0
|= X86_CR0_TS
;
1624 vcpu
->fpu_active
= 0;
1628 static int is_disabled(void)
1632 rdmsrl(MSR_VM_CR
, vm_cr
);
1633 if (vm_cr
& (1 << SVM_VM_CR_SVM_DISABLE
))
1640 svm_patch_hypercall(struct kvm_vcpu
*vcpu
, unsigned char *hypercall
)
1643 * Patch in the VMMCALL instruction:
1645 hypercall
[0] = 0x0f;
1646 hypercall
[1] = 0x01;
1647 hypercall
[2] = 0xd9;
1650 static void svm_check_processor_compat(void *rtn
)
1655 static struct kvm_x86_ops svm_x86_ops
= {
1656 .cpu_has_kvm_support
= has_svm
,
1657 .disabled_by_bios
= is_disabled
,
1658 .hardware_setup
= svm_hardware_setup
,
1659 .hardware_unsetup
= svm_hardware_unsetup
,
1660 .check_processor_compatibility
= svm_check_processor_compat
,
1661 .hardware_enable
= svm_hardware_enable
,
1662 .hardware_disable
= svm_hardware_disable
,
1664 .vcpu_create
= svm_create_vcpu
,
1665 .vcpu_free
= svm_free_vcpu
,
1666 .vcpu_reset
= svm_vcpu_reset
,
1668 .prepare_guest_switch
= svm_prepare_guest_switch
,
1669 .vcpu_load
= svm_vcpu_load
,
1670 .vcpu_put
= svm_vcpu_put
,
1671 .vcpu_decache
= svm_vcpu_decache
,
1673 .set_guest_debug
= svm_guest_debug
,
1674 .get_msr
= svm_get_msr
,
1675 .set_msr
= svm_set_msr
,
1676 .get_segment_base
= svm_get_segment_base
,
1677 .get_segment
= svm_get_segment
,
1678 .set_segment
= svm_set_segment
,
1679 .get_cs_db_l_bits
= kvm_get_cs_db_l_bits
,
1680 .decache_cr4_guest_bits
= svm_decache_cr4_guest_bits
,
1681 .set_cr0
= svm_set_cr0
,
1682 .set_cr3
= svm_set_cr3
,
1683 .set_cr4
= svm_set_cr4
,
1684 .set_efer
= svm_set_efer
,
1685 .get_idt
= svm_get_idt
,
1686 .set_idt
= svm_set_idt
,
1687 .get_gdt
= svm_get_gdt
,
1688 .set_gdt
= svm_set_gdt
,
1689 .get_dr
= svm_get_dr
,
1690 .set_dr
= svm_set_dr
,
1691 .cache_regs
= svm_cache_regs
,
1692 .decache_regs
= svm_decache_regs
,
1693 .get_rflags
= svm_get_rflags
,
1694 .set_rflags
= svm_set_rflags
,
1696 .tlb_flush
= svm_flush_tlb
,
1698 .run
= svm_vcpu_run
,
1699 .handle_exit
= handle_exit
,
1700 .skip_emulated_instruction
= skip_emulated_instruction
,
1701 .patch_hypercall
= svm_patch_hypercall
,
1702 .get_irq
= svm_get_irq
,
1703 .set_irq
= svm_set_irq
,
1704 .queue_exception
= svm_queue_exception
,
1705 .exception_injected
= svm_exception_injected
,
1706 .inject_pending_irq
= svm_intr_assist
,
1707 .inject_pending_vectors
= do_interrupt_requests
,
1709 .set_tss_addr
= svm_set_tss_addr
,
1712 static int __init
svm_init(void)
1714 return kvm_init(&svm_x86_ops
, sizeof(struct vcpu_svm
),
1718 static void __exit
svm_exit(void)
1723 module_init(svm_init
)
1724 module_exit(svm_exit
)