2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
20 #include <linux/kvm.h>
21 #include <linux/module.h>
22 #include <linux/errno.h>
23 #include <asm/processor.h>
24 #include <linux/percpu.h>
25 #include <linux/gfp.h>
28 #include <linux/miscdevice.h>
29 #include <linux/vmalloc.h>
30 #include <asm/uaccess.h>
31 #include <linux/reboot.h>
33 #include <linux/debugfs.h>
34 #include <linux/highmem.h>
35 #include <linux/file.h>
37 #include <linux/sysdev.h>
38 #include <linux/cpu.h>
40 #include "x86_emulate.h"
41 #include "segment_descriptor.h"
43 MODULE_AUTHOR("Qumranet");
44 MODULE_LICENSE("GPL");
46 static DEFINE_SPINLOCK(kvm_lock
);
47 static LIST_HEAD(vm_list
);
49 struct kvm_arch_ops
*kvm_arch_ops
;
50 struct kvm_stat kvm_stat
;
51 EXPORT_SYMBOL_GPL(kvm_stat
);
53 static struct kvm_stats_debugfs_item
{
56 struct dentry
*dentry
;
57 } debugfs_entries
[] = {
58 { "pf_fixed", &kvm_stat
.pf_fixed
},
59 { "pf_guest", &kvm_stat
.pf_guest
},
60 { "tlb_flush", &kvm_stat
.tlb_flush
},
61 { "invlpg", &kvm_stat
.invlpg
},
62 { "exits", &kvm_stat
.exits
},
63 { "io_exits", &kvm_stat
.io_exits
},
64 { "mmio_exits", &kvm_stat
.mmio_exits
},
65 { "signal_exits", &kvm_stat
.signal_exits
},
66 { "irq_window", &kvm_stat
.irq_window_exits
},
67 { "halt_exits", &kvm_stat
.halt_exits
},
68 { "request_irq", &kvm_stat
.request_irq_exits
},
69 { "irq_exits", &kvm_stat
.irq_exits
},
73 static struct dentry
*debugfs_dir
;
75 #define MAX_IO_MSRS 256
77 #define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL
78 #define LMSW_GUEST_MASK 0x0eULL
79 #define CR4_RESEVED_BITS (~((1ULL << 11) - 1))
80 #define CR8_RESEVED_BITS (~0x0fULL)
81 #define EFER_RESERVED_BITS 0xfffffffffffff2fe
84 // LDT or TSS descriptor in the GDT. 16 bytes.
85 struct segment_descriptor_64
{
86 struct segment_descriptor s
;
93 unsigned long segment_base(u16 selector
)
95 struct descriptor_table gdt
;
96 struct segment_descriptor
*d
;
97 unsigned long table_base
;
98 typedef unsigned long ul
;
104 asm ("sgdt %0" : "=m"(gdt
));
105 table_base
= gdt
.base
;
107 if (selector
& 4) { /* from ldt */
110 asm ("sldt %0" : "=g"(ldt_selector
));
111 table_base
= segment_base(ldt_selector
);
113 d
= (struct segment_descriptor
*)(table_base
+ (selector
& ~7));
114 v
= d
->base_low
| ((ul
)d
->base_mid
<< 16) | ((ul
)d
->base_high
<< 24);
117 && (d
->type
== 2 || d
->type
== 9 || d
->type
== 11))
118 v
|= ((ul
)((struct segment_descriptor_64
*)d
)->base_higher
) << 32;
122 EXPORT_SYMBOL_GPL(segment_base
);
124 static inline int valid_vcpu(int n
)
126 return likely(n
>= 0 && n
< KVM_MAX_VCPUS
);
129 int kvm_read_guest(struct kvm_vcpu
*vcpu
, gva_t addr
, unsigned long size
,
132 unsigned char *host_buf
= dest
;
133 unsigned long req_size
= size
;
141 paddr
= gva_to_hpa(vcpu
, addr
);
143 if (is_error_hpa(paddr
))
146 guest_buf
= (hva_t
)kmap_atomic(
147 pfn_to_page(paddr
>> PAGE_SHIFT
),
149 offset
= addr
& ~PAGE_MASK
;
151 now
= min(size
, PAGE_SIZE
- offset
);
152 memcpy(host_buf
, (void*)guest_buf
, now
);
156 kunmap_atomic((void *)(guest_buf
& PAGE_MASK
), KM_USER0
);
158 return req_size
- size
;
160 EXPORT_SYMBOL_GPL(kvm_read_guest
);
162 int kvm_write_guest(struct kvm_vcpu
*vcpu
, gva_t addr
, unsigned long size
,
165 unsigned char *host_buf
= data
;
166 unsigned long req_size
= size
;
174 paddr
= gva_to_hpa(vcpu
, addr
);
176 if (is_error_hpa(paddr
))
179 guest_buf
= (hva_t
)kmap_atomic(
180 pfn_to_page(paddr
>> PAGE_SHIFT
), KM_USER0
);
181 offset
= addr
& ~PAGE_MASK
;
183 now
= min(size
, PAGE_SIZE
- offset
);
184 memcpy((void*)guest_buf
, host_buf
, now
);
188 kunmap_atomic((void *)(guest_buf
& PAGE_MASK
), KM_USER0
);
190 return req_size
- size
;
192 EXPORT_SYMBOL_GPL(kvm_write_guest
);
194 static int vcpu_slot(struct kvm_vcpu
*vcpu
)
196 return vcpu
- vcpu
->kvm
->vcpus
;
200 * Switches to specified vcpu, until a matching vcpu_put()
202 static struct kvm_vcpu
*vcpu_load(struct kvm
*kvm
, int vcpu_slot
)
204 struct kvm_vcpu
*vcpu
= &kvm
->vcpus
[vcpu_slot
];
206 mutex_lock(&vcpu
->mutex
);
207 if (unlikely(!vcpu
->vmcs
)) {
208 mutex_unlock(&vcpu
->mutex
);
211 return kvm_arch_ops
->vcpu_load(vcpu
);
214 static void vcpu_put(struct kvm_vcpu
*vcpu
)
216 kvm_arch_ops
->vcpu_put(vcpu
);
217 mutex_unlock(&vcpu
->mutex
);
220 static int kvm_dev_open(struct inode
*inode
, struct file
*filp
)
222 struct kvm
*kvm
= kzalloc(sizeof(struct kvm
), GFP_KERNEL
);
228 spin_lock_init(&kvm
->lock
);
229 INIT_LIST_HEAD(&kvm
->active_mmu_pages
);
230 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
231 struct kvm_vcpu
*vcpu
= &kvm
->vcpus
[i
];
233 mutex_init(&vcpu
->mutex
);
236 vcpu
->mmu
.root_hpa
= INVALID_PAGE
;
237 INIT_LIST_HEAD(&vcpu
->free_pages
);
238 spin_lock(&kvm_lock
);
239 list_add(&kvm
->vm_list
, &vm_list
);
240 spin_unlock(&kvm_lock
);
242 filp
->private_data
= kvm
;
247 * Free any memory in @free but not in @dont.
249 static void kvm_free_physmem_slot(struct kvm_memory_slot
*free
,
250 struct kvm_memory_slot
*dont
)
254 if (!dont
|| free
->phys_mem
!= dont
->phys_mem
)
255 if (free
->phys_mem
) {
256 for (i
= 0; i
< free
->npages
; ++i
)
257 if (free
->phys_mem
[i
])
258 __free_page(free
->phys_mem
[i
]);
259 vfree(free
->phys_mem
);
262 if (!dont
|| free
->dirty_bitmap
!= dont
->dirty_bitmap
)
263 vfree(free
->dirty_bitmap
);
265 free
->phys_mem
= NULL
;
267 free
->dirty_bitmap
= NULL
;
270 static void kvm_free_physmem(struct kvm
*kvm
)
274 for (i
= 0; i
< kvm
->nmemslots
; ++i
)
275 kvm_free_physmem_slot(&kvm
->memslots
[i
], NULL
);
278 static void kvm_free_vcpu(struct kvm_vcpu
*vcpu
)
280 if (!vcpu_load(vcpu
->kvm
, vcpu_slot(vcpu
)))
283 kvm_mmu_destroy(vcpu
);
285 kvm_arch_ops
->vcpu_free(vcpu
);
288 static void kvm_free_vcpus(struct kvm
*kvm
)
292 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
)
293 kvm_free_vcpu(&kvm
->vcpus
[i
]);
296 static int kvm_dev_release(struct inode
*inode
, struct file
*filp
)
298 struct kvm
*kvm
= filp
->private_data
;
300 spin_lock(&kvm_lock
);
301 list_del(&kvm
->vm_list
);
302 spin_unlock(&kvm_lock
);
304 kvm_free_physmem(kvm
);
309 static void inject_gp(struct kvm_vcpu
*vcpu
)
311 kvm_arch_ops
->inject_gp(vcpu
, 0);
315 * Load the pae pdptrs. Return true is they are all valid.
317 static int load_pdptrs(struct kvm_vcpu
*vcpu
, unsigned long cr3
)
319 gfn_t pdpt_gfn
= cr3
>> PAGE_SHIFT
;
320 unsigned offset
= ((cr3
& (PAGE_SIZE
-1)) >> 5) << 2;
325 struct kvm_memory_slot
*memslot
;
327 spin_lock(&vcpu
->kvm
->lock
);
328 memslot
= gfn_to_memslot(vcpu
->kvm
, pdpt_gfn
);
329 /* FIXME: !memslot - emulate? 0xff? */
330 pdpt
= kmap_atomic(gfn_to_page(memslot
, pdpt_gfn
), KM_USER0
);
333 for (i
= 0; i
< 4; ++i
) {
334 pdpte
= pdpt
[offset
+ i
];
335 if ((pdpte
& 1) && (pdpte
& 0xfffffff0000001e6ull
)) {
341 for (i
= 0; i
< 4; ++i
)
342 vcpu
->pdptrs
[i
] = pdpt
[offset
+ i
];
345 kunmap_atomic(pdpt
, KM_USER0
);
346 spin_unlock(&vcpu
->kvm
->lock
);
351 void set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
353 if (cr0
& CR0_RESEVED_BITS
) {
354 printk(KERN_DEBUG
"set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
360 if ((cr0
& CR0_NW_MASK
) && !(cr0
& CR0_CD_MASK
)) {
361 printk(KERN_DEBUG
"set_cr0: #GP, CD == 0 && NW == 1\n");
366 if ((cr0
& CR0_PG_MASK
) && !(cr0
& CR0_PE_MASK
)) {
367 printk(KERN_DEBUG
"set_cr0: #GP, set PG flag "
368 "and a clear PE flag\n");
373 if (!is_paging(vcpu
) && (cr0
& CR0_PG_MASK
)) {
375 if ((vcpu
->shadow_efer
& EFER_LME
)) {
379 printk(KERN_DEBUG
"set_cr0: #GP, start paging "
380 "in long mode while PAE is disabled\n");
384 kvm_arch_ops
->get_cs_db_l_bits(vcpu
, &cs_db
, &cs_l
);
386 printk(KERN_DEBUG
"set_cr0: #GP, start paging "
387 "in long mode while CS.L == 1\n");
394 if (is_pae(vcpu
) && !load_pdptrs(vcpu
, vcpu
->cr3
)) {
395 printk(KERN_DEBUG
"set_cr0: #GP, pdptrs "
403 kvm_arch_ops
->set_cr0(vcpu
, cr0
);
406 spin_lock(&vcpu
->kvm
->lock
);
407 kvm_mmu_reset_context(vcpu
);
408 spin_unlock(&vcpu
->kvm
->lock
);
411 EXPORT_SYMBOL_GPL(set_cr0
);
413 void lmsw(struct kvm_vcpu
*vcpu
, unsigned long msw
)
415 kvm_arch_ops
->decache_cr0_cr4_guest_bits(vcpu
);
416 set_cr0(vcpu
, (vcpu
->cr0
& ~0x0ful
) | (msw
& 0x0f));
418 EXPORT_SYMBOL_GPL(lmsw
);
420 void set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
422 if (cr4
& CR4_RESEVED_BITS
) {
423 printk(KERN_DEBUG
"set_cr4: #GP, reserved bits\n");
428 if (is_long_mode(vcpu
)) {
429 if (!(cr4
& CR4_PAE_MASK
)) {
430 printk(KERN_DEBUG
"set_cr4: #GP, clearing PAE while "
435 } else if (is_paging(vcpu
) && !is_pae(vcpu
) && (cr4
& CR4_PAE_MASK
)
436 && !load_pdptrs(vcpu
, vcpu
->cr3
)) {
437 printk(KERN_DEBUG
"set_cr4: #GP, pdptrs reserved bits\n");
441 if (cr4
& CR4_VMXE_MASK
) {
442 printk(KERN_DEBUG
"set_cr4: #GP, setting VMXE\n");
446 kvm_arch_ops
->set_cr4(vcpu
, cr4
);
447 spin_lock(&vcpu
->kvm
->lock
);
448 kvm_mmu_reset_context(vcpu
);
449 spin_unlock(&vcpu
->kvm
->lock
);
451 EXPORT_SYMBOL_GPL(set_cr4
);
453 void set_cr3(struct kvm_vcpu
*vcpu
, unsigned long cr3
)
455 if (is_long_mode(vcpu
)) {
456 if (cr3
& CR3_L_MODE_RESEVED_BITS
) {
457 printk(KERN_DEBUG
"set_cr3: #GP, reserved bits\n");
462 if (cr3
& CR3_RESEVED_BITS
) {
463 printk(KERN_DEBUG
"set_cr3: #GP, reserved bits\n");
467 if (is_paging(vcpu
) && is_pae(vcpu
) &&
468 !load_pdptrs(vcpu
, cr3
)) {
469 printk(KERN_DEBUG
"set_cr3: #GP, pdptrs "
477 spin_lock(&vcpu
->kvm
->lock
);
479 * Does the new cr3 value map to physical memory? (Note, we
480 * catch an invalid cr3 even in real-mode, because it would
481 * cause trouble later on when we turn on paging anyway.)
483 * A real CPU would silently accept an invalid cr3 and would
484 * attempt to use it - with largely undefined (and often hard
485 * to debug) behavior on the guest side.
487 if (unlikely(!gfn_to_memslot(vcpu
->kvm
, cr3
>> PAGE_SHIFT
)))
490 vcpu
->mmu
.new_cr3(vcpu
);
491 spin_unlock(&vcpu
->kvm
->lock
);
493 EXPORT_SYMBOL_GPL(set_cr3
);
495 void set_cr8(struct kvm_vcpu
*vcpu
, unsigned long cr8
)
497 if ( cr8
& CR8_RESEVED_BITS
) {
498 printk(KERN_DEBUG
"set_cr8: #GP, reserved bits 0x%lx\n", cr8
);
504 EXPORT_SYMBOL_GPL(set_cr8
);
506 void fx_init(struct kvm_vcpu
*vcpu
)
508 struct __attribute__ ((__packed__
)) fx_image_s
{
514 u64 operand
;// fpu dp
520 fx_save(vcpu
->host_fx_image
);
522 fx_save(vcpu
->guest_fx_image
);
523 fx_restore(vcpu
->host_fx_image
);
525 fx_image
= (struct fx_image_s
*)vcpu
->guest_fx_image
;
526 fx_image
->mxcsr
= 0x1f80;
527 memset(vcpu
->guest_fx_image
+ sizeof(struct fx_image_s
),
528 0, FX_IMAGE_SIZE
- sizeof(struct fx_image_s
));
530 EXPORT_SYMBOL_GPL(fx_init
);
533 * Creates some virtual cpus. Good luck creating more than one.
535 static int kvm_dev_ioctl_create_vcpu(struct kvm
*kvm
, int n
)
538 struct kvm_vcpu
*vcpu
;
544 vcpu
= &kvm
->vcpus
[n
];
546 mutex_lock(&vcpu
->mutex
);
549 mutex_unlock(&vcpu
->mutex
);
553 vcpu
->host_fx_image
= (char*)ALIGN((hva_t
)vcpu
->fx_buf
,
555 vcpu
->guest_fx_image
= vcpu
->host_fx_image
+ FX_IMAGE_SIZE
;
557 r
= kvm_arch_ops
->vcpu_create(vcpu
);
561 r
= kvm_mmu_create(vcpu
);
565 kvm_arch_ops
->vcpu_load(vcpu
);
566 r
= kvm_mmu_setup(vcpu
);
568 r
= kvm_arch_ops
->vcpu_setup(vcpu
);
578 mutex_unlock(&vcpu
->mutex
);
584 * Allocate some memory and give it an address in the guest physical address
587 * Discontiguous memory is allowed, mostly for framebuffers.
589 static int kvm_dev_ioctl_set_memory_region(struct kvm
*kvm
,
590 struct kvm_memory_region
*mem
)
594 unsigned long npages
;
596 struct kvm_memory_slot
*memslot
;
597 struct kvm_memory_slot old
, new;
598 int memory_config_version
;
601 /* General sanity checks */
602 if (mem
->memory_size
& (PAGE_SIZE
- 1))
604 if (mem
->guest_phys_addr
& (PAGE_SIZE
- 1))
606 if (mem
->slot
>= KVM_MEMORY_SLOTS
)
608 if (mem
->guest_phys_addr
+ mem
->memory_size
< mem
->guest_phys_addr
)
611 memslot
= &kvm
->memslots
[mem
->slot
];
612 base_gfn
= mem
->guest_phys_addr
>> PAGE_SHIFT
;
613 npages
= mem
->memory_size
>> PAGE_SHIFT
;
616 mem
->flags
&= ~KVM_MEM_LOG_DIRTY_PAGES
;
619 spin_lock(&kvm
->lock
);
621 memory_config_version
= kvm
->memory_config_version
;
622 new = old
= *memslot
;
624 new.base_gfn
= base_gfn
;
626 new.flags
= mem
->flags
;
628 /* Disallow changing a memory slot's size. */
630 if (npages
&& old
.npages
&& npages
!= old
.npages
)
633 /* Check for overlaps */
635 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
636 struct kvm_memory_slot
*s
= &kvm
->memslots
[i
];
640 if (!((base_gfn
+ npages
<= s
->base_gfn
) ||
641 (base_gfn
>= s
->base_gfn
+ s
->npages
)))
645 * Do memory allocations outside lock. memory_config_version will
648 spin_unlock(&kvm
->lock
);
650 /* Deallocate if slot is being removed */
654 /* Free page dirty bitmap if unneeded */
655 if (!(new.flags
& KVM_MEM_LOG_DIRTY_PAGES
))
656 new.dirty_bitmap
= NULL
;
660 /* Allocate if a slot is being created */
661 if (npages
&& !new.phys_mem
) {
662 new.phys_mem
= vmalloc(npages
* sizeof(struct page
*));
667 memset(new.phys_mem
, 0, npages
* sizeof(struct page
*));
668 for (i
= 0; i
< npages
; ++i
) {
669 new.phys_mem
[i
] = alloc_page(GFP_HIGHUSER
671 if (!new.phys_mem
[i
])
673 new.phys_mem
[i
]->private = 0;
677 /* Allocate page dirty bitmap if needed */
678 if ((new.flags
& KVM_MEM_LOG_DIRTY_PAGES
) && !new.dirty_bitmap
) {
679 unsigned dirty_bytes
= ALIGN(npages
, BITS_PER_LONG
) / 8;
681 new.dirty_bitmap
= vmalloc(dirty_bytes
);
682 if (!new.dirty_bitmap
)
684 memset(new.dirty_bitmap
, 0, dirty_bytes
);
687 spin_lock(&kvm
->lock
);
689 if (memory_config_version
!= kvm
->memory_config_version
) {
690 spin_unlock(&kvm
->lock
);
691 kvm_free_physmem_slot(&new, &old
);
699 if (mem
->slot
>= kvm
->nmemslots
)
700 kvm
->nmemslots
= mem
->slot
+ 1;
703 ++kvm
->memory_config_version
;
705 spin_unlock(&kvm
->lock
);
707 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
708 struct kvm_vcpu
*vcpu
;
710 vcpu
= vcpu_load(kvm
, i
);
713 kvm_mmu_reset_context(vcpu
);
717 kvm_free_physmem_slot(&old
, &new);
721 spin_unlock(&kvm
->lock
);
723 kvm_free_physmem_slot(&new, &old
);
728 static void do_remove_write_access(struct kvm_vcpu
*vcpu
, int slot
)
730 spin_lock(&vcpu
->kvm
->lock
);
731 kvm_mmu_slot_remove_write_access(vcpu
, slot
);
732 spin_unlock(&vcpu
->kvm
->lock
);
736 * Get (and clear) the dirty memory log for a memory slot.
738 static int kvm_dev_ioctl_get_dirty_log(struct kvm
*kvm
,
739 struct kvm_dirty_log
*log
)
741 struct kvm_memory_slot
*memslot
;
745 unsigned long any
= 0;
747 spin_lock(&kvm
->lock
);
750 * Prevent changes to guest memory configuration even while the lock
754 spin_unlock(&kvm
->lock
);
756 if (log
->slot
>= KVM_MEMORY_SLOTS
)
759 memslot
= &kvm
->memslots
[log
->slot
];
761 if (!memslot
->dirty_bitmap
)
764 n
= ALIGN(memslot
->npages
, 8) / 8;
766 for (i
= 0; !any
&& i
< n
; ++i
)
767 any
= memslot
->dirty_bitmap
[i
];
770 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
775 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
776 struct kvm_vcpu
*vcpu
= vcpu_load(kvm
, i
);
781 do_remove_write_access(vcpu
, log
->slot
);
782 memset(memslot
->dirty_bitmap
, 0, n
);
785 kvm_arch_ops
->tlb_flush(vcpu
);
793 spin_lock(&kvm
->lock
);
795 spin_unlock(&kvm
->lock
);
799 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
803 for (i
= 0; i
< kvm
->nmemslots
; ++i
) {
804 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
806 if (gfn
>= memslot
->base_gfn
807 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
812 EXPORT_SYMBOL_GPL(gfn_to_memslot
);
814 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
)
817 struct kvm_memory_slot
*memslot
= NULL
;
818 unsigned long rel_gfn
;
820 for (i
= 0; i
< kvm
->nmemslots
; ++i
) {
821 memslot
= &kvm
->memslots
[i
];
823 if (gfn
>= memslot
->base_gfn
824 && gfn
< memslot
->base_gfn
+ memslot
->npages
) {
826 if (!memslot
|| !memslot
->dirty_bitmap
)
829 rel_gfn
= gfn
- memslot
->base_gfn
;
832 if (!test_bit(rel_gfn
, memslot
->dirty_bitmap
))
833 set_bit(rel_gfn
, memslot
->dirty_bitmap
);
839 static int emulator_read_std(unsigned long addr
,
842 struct x86_emulate_ctxt
*ctxt
)
844 struct kvm_vcpu
*vcpu
= ctxt
->vcpu
;
848 gpa_t gpa
= vcpu
->mmu
.gva_to_gpa(vcpu
, addr
);
849 unsigned offset
= addr
& (PAGE_SIZE
-1);
850 unsigned tocopy
= min(bytes
, (unsigned)PAGE_SIZE
- offset
);
852 struct kvm_memory_slot
*memslot
;
855 if (gpa
== UNMAPPED_GVA
)
856 return X86EMUL_PROPAGATE_FAULT
;
857 pfn
= gpa
>> PAGE_SHIFT
;
858 memslot
= gfn_to_memslot(vcpu
->kvm
, pfn
);
860 return X86EMUL_UNHANDLEABLE
;
861 page
= kmap_atomic(gfn_to_page(memslot
, pfn
), KM_USER0
);
863 memcpy(data
, page
+ offset
, tocopy
);
865 kunmap_atomic(page
, KM_USER0
);
872 return X86EMUL_CONTINUE
;
875 static int emulator_write_std(unsigned long addr
,
878 struct x86_emulate_ctxt
*ctxt
)
880 printk(KERN_ERR
"emulator_write_std: addr %lx n %d\n",
882 return X86EMUL_UNHANDLEABLE
;
885 static int emulator_read_emulated(unsigned long addr
,
888 struct x86_emulate_ctxt
*ctxt
)
890 struct kvm_vcpu
*vcpu
= ctxt
->vcpu
;
892 if (vcpu
->mmio_read_completed
) {
893 memcpy(val
, vcpu
->mmio_data
, bytes
);
894 vcpu
->mmio_read_completed
= 0;
895 return X86EMUL_CONTINUE
;
896 } else if (emulator_read_std(addr
, val
, bytes
, ctxt
)
898 return X86EMUL_CONTINUE
;
900 gpa_t gpa
= vcpu
->mmu
.gva_to_gpa(vcpu
, addr
);
902 if (gpa
== UNMAPPED_GVA
)
903 return X86EMUL_PROPAGATE_FAULT
;
904 vcpu
->mmio_needed
= 1;
905 vcpu
->mmio_phys_addr
= gpa
;
906 vcpu
->mmio_size
= bytes
;
907 vcpu
->mmio_is_write
= 0;
909 return X86EMUL_UNHANDLEABLE
;
913 static int emulator_write_phys(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
914 unsigned long val
, int bytes
)
916 struct kvm_memory_slot
*m
;
920 if (((gpa
+ bytes
- 1) >> PAGE_SHIFT
) != (gpa
>> PAGE_SHIFT
))
922 m
= gfn_to_memslot(vcpu
->kvm
, gpa
>> PAGE_SHIFT
);
925 page
= gfn_to_page(m
, gpa
>> PAGE_SHIFT
);
926 kvm_mmu_pre_write(vcpu
, gpa
, bytes
);
927 virt
= kmap_atomic(page
, KM_USER0
);
928 memcpy(virt
+ offset_in_page(gpa
), &val
, bytes
);
929 kunmap_atomic(virt
, KM_USER0
);
930 kvm_mmu_post_write(vcpu
, gpa
, bytes
);
934 static int emulator_write_emulated(unsigned long addr
,
937 struct x86_emulate_ctxt
*ctxt
)
939 struct kvm_vcpu
*vcpu
= ctxt
->vcpu
;
940 gpa_t gpa
= vcpu
->mmu
.gva_to_gpa(vcpu
, addr
);
942 if (gpa
== UNMAPPED_GVA
)
943 return X86EMUL_PROPAGATE_FAULT
;
945 if (emulator_write_phys(vcpu
, gpa
, val
, bytes
))
946 return X86EMUL_CONTINUE
;
948 vcpu
->mmio_needed
= 1;
949 vcpu
->mmio_phys_addr
= gpa
;
950 vcpu
->mmio_size
= bytes
;
951 vcpu
->mmio_is_write
= 1;
952 memcpy(vcpu
->mmio_data
, &val
, bytes
);
954 return X86EMUL_CONTINUE
;
957 static int emulator_cmpxchg_emulated(unsigned long addr
,
961 struct x86_emulate_ctxt
*ctxt
)
967 printk(KERN_WARNING
"kvm: emulating exchange as write\n");
969 return emulator_write_emulated(addr
, new, bytes
, ctxt
);
974 static int emulator_cmpxchg8b_emulated(unsigned long addr
,
975 unsigned long old_lo
,
976 unsigned long old_hi
,
977 unsigned long new_lo
,
978 unsigned long new_hi
,
979 struct x86_emulate_ctxt
*ctxt
)
986 printk(KERN_WARNING
"kvm: emulating exchange8b as write\n");
988 r
= emulator_write_emulated(addr
, new_lo
, 4, ctxt
);
989 if (r
!= X86EMUL_CONTINUE
)
991 return emulator_write_emulated(addr
+4, new_hi
, 4, ctxt
);
996 static unsigned long get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
998 return kvm_arch_ops
->get_segment_base(vcpu
, seg
);
1001 int emulate_invlpg(struct kvm_vcpu
*vcpu
, gva_t address
)
1003 return X86EMUL_CONTINUE
;
1006 int emulate_clts(struct kvm_vcpu
*vcpu
)
1010 kvm_arch_ops
->decache_cr0_cr4_guest_bits(vcpu
);
1011 cr0
= vcpu
->cr0
& ~CR0_TS_MASK
;
1012 kvm_arch_ops
->set_cr0(vcpu
, cr0
);
1013 return X86EMUL_CONTINUE
;
1016 int emulator_get_dr(struct x86_emulate_ctxt
* ctxt
, int dr
, unsigned long *dest
)
1018 struct kvm_vcpu
*vcpu
= ctxt
->vcpu
;
1022 *dest
= kvm_arch_ops
->get_dr(vcpu
, dr
);
1023 return X86EMUL_CONTINUE
;
1025 printk(KERN_DEBUG
"%s: unexpected dr %u\n",
1027 return X86EMUL_UNHANDLEABLE
;
1031 int emulator_set_dr(struct x86_emulate_ctxt
*ctxt
, int dr
, unsigned long value
)
1033 unsigned long mask
= (ctxt
->mode
== X86EMUL_MODE_PROT64
) ? ~0ULL : ~0U;
1036 kvm_arch_ops
->set_dr(ctxt
->vcpu
, dr
, value
& mask
, &exception
);
1038 /* FIXME: better handling */
1039 return X86EMUL_UNHANDLEABLE
;
1041 return X86EMUL_CONTINUE
;
1044 static void report_emulation_failure(struct x86_emulate_ctxt
*ctxt
)
1046 static int reported
;
1048 unsigned long rip
= ctxt
->vcpu
->rip
;
1049 unsigned long rip_linear
;
1051 rip_linear
= rip
+ get_segment_base(ctxt
->vcpu
, VCPU_SREG_CS
);
1056 emulator_read_std(rip_linear
, (void *)opcodes
, 4, ctxt
);
1058 printk(KERN_ERR
"emulation failed but !mmio_needed?"
1059 " rip %lx %02x %02x %02x %02x\n",
1060 rip
, opcodes
[0], opcodes
[1], opcodes
[2], opcodes
[3]);
1064 struct x86_emulate_ops emulate_ops
= {
1065 .read_std
= emulator_read_std
,
1066 .write_std
= emulator_write_std
,
1067 .read_emulated
= emulator_read_emulated
,
1068 .write_emulated
= emulator_write_emulated
,
1069 .cmpxchg_emulated
= emulator_cmpxchg_emulated
,
1070 #ifdef CONFIG_X86_32
1071 .cmpxchg8b_emulated
= emulator_cmpxchg8b_emulated
,
1075 int emulate_instruction(struct kvm_vcpu
*vcpu
,
1076 struct kvm_run
*run
,
1080 struct x86_emulate_ctxt emulate_ctxt
;
1084 kvm_arch_ops
->cache_regs(vcpu
);
1086 kvm_arch_ops
->get_cs_db_l_bits(vcpu
, &cs_db
, &cs_l
);
1088 emulate_ctxt
.vcpu
= vcpu
;
1089 emulate_ctxt
.eflags
= kvm_arch_ops
->get_rflags(vcpu
);
1090 emulate_ctxt
.cr2
= cr2
;
1091 emulate_ctxt
.mode
= (emulate_ctxt
.eflags
& X86_EFLAGS_VM
)
1092 ? X86EMUL_MODE_REAL
: cs_l
1093 ? X86EMUL_MODE_PROT64
: cs_db
1094 ? X86EMUL_MODE_PROT32
: X86EMUL_MODE_PROT16
;
1096 if (emulate_ctxt
.mode
== X86EMUL_MODE_PROT64
) {
1097 emulate_ctxt
.cs_base
= 0;
1098 emulate_ctxt
.ds_base
= 0;
1099 emulate_ctxt
.es_base
= 0;
1100 emulate_ctxt
.ss_base
= 0;
1102 emulate_ctxt
.cs_base
= get_segment_base(vcpu
, VCPU_SREG_CS
);
1103 emulate_ctxt
.ds_base
= get_segment_base(vcpu
, VCPU_SREG_DS
);
1104 emulate_ctxt
.es_base
= get_segment_base(vcpu
, VCPU_SREG_ES
);
1105 emulate_ctxt
.ss_base
= get_segment_base(vcpu
, VCPU_SREG_SS
);
1108 emulate_ctxt
.gs_base
= get_segment_base(vcpu
, VCPU_SREG_GS
);
1109 emulate_ctxt
.fs_base
= get_segment_base(vcpu
, VCPU_SREG_FS
);
1111 vcpu
->mmio_is_write
= 0;
1112 r
= x86_emulate_memop(&emulate_ctxt
, &emulate_ops
);
1114 if ((r
|| vcpu
->mmio_is_write
) && run
) {
1115 run
->mmio
.phys_addr
= vcpu
->mmio_phys_addr
;
1116 memcpy(run
->mmio
.data
, vcpu
->mmio_data
, 8);
1117 run
->mmio
.len
= vcpu
->mmio_size
;
1118 run
->mmio
.is_write
= vcpu
->mmio_is_write
;
1122 if (kvm_mmu_unprotect_page_virt(vcpu
, cr2
))
1123 return EMULATE_DONE
;
1124 if (!vcpu
->mmio_needed
) {
1125 report_emulation_failure(&emulate_ctxt
);
1126 return EMULATE_FAIL
;
1128 return EMULATE_DO_MMIO
;
1131 kvm_arch_ops
->decache_regs(vcpu
);
1132 kvm_arch_ops
->set_rflags(vcpu
, emulate_ctxt
.eflags
);
1134 if (vcpu
->mmio_is_write
)
1135 return EMULATE_DO_MMIO
;
1137 return EMULATE_DONE
;
1139 EXPORT_SYMBOL_GPL(emulate_instruction
);
1141 static u64
mk_cr_64(u64 curr_cr
, u32 new_val
)
1143 return (curr_cr
& ~((1ULL << 32) - 1)) | new_val
;
1146 void realmode_lgdt(struct kvm_vcpu
*vcpu
, u16 limit
, unsigned long base
)
1148 struct descriptor_table dt
= { limit
, base
};
1150 kvm_arch_ops
->set_gdt(vcpu
, &dt
);
1153 void realmode_lidt(struct kvm_vcpu
*vcpu
, u16 limit
, unsigned long base
)
1155 struct descriptor_table dt
= { limit
, base
};
1157 kvm_arch_ops
->set_idt(vcpu
, &dt
);
1160 void realmode_lmsw(struct kvm_vcpu
*vcpu
, unsigned long msw
,
1161 unsigned long *rflags
)
1164 *rflags
= kvm_arch_ops
->get_rflags(vcpu
);
1167 unsigned long realmode_get_cr(struct kvm_vcpu
*vcpu
, int cr
)
1169 kvm_arch_ops
->decache_cr0_cr4_guest_bits(vcpu
);
1180 vcpu_printf(vcpu
, "%s: unexpected cr %u\n", __FUNCTION__
, cr
);
1185 void realmode_set_cr(struct kvm_vcpu
*vcpu
, int cr
, unsigned long val
,
1186 unsigned long *rflags
)
1190 set_cr0(vcpu
, mk_cr_64(vcpu
->cr0
, val
));
1191 *rflags
= kvm_arch_ops
->get_rflags(vcpu
);
1200 set_cr4(vcpu
, mk_cr_64(vcpu
->cr4
, val
));
1203 vcpu_printf(vcpu
, "%s: unexpected cr %u\n", __FUNCTION__
, cr
);
1207 int kvm_get_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
)
1212 case 0xc0010010: /* SYSCFG */
1213 case 0xc0010015: /* HWCR */
1214 case MSR_IA32_PLATFORM_ID
:
1215 case MSR_IA32_P5_MC_ADDR
:
1216 case MSR_IA32_P5_MC_TYPE
:
1217 case MSR_IA32_MC0_CTL
:
1218 case MSR_IA32_MCG_STATUS
:
1219 case MSR_IA32_MCG_CAP
:
1220 case MSR_IA32_MC0_MISC
:
1221 case MSR_IA32_MC0_MISC
+4:
1222 case MSR_IA32_MC0_MISC
+8:
1223 case MSR_IA32_MC0_MISC
+12:
1224 case MSR_IA32_MC0_MISC
+16:
1225 case MSR_IA32_UCODE_REV
:
1226 case MSR_IA32_PERF_STATUS
:
1227 /* MTRR registers */
1229 case 0x200 ... 0x2ff:
1232 case 0xcd: /* fsb frequency */
1235 case MSR_IA32_APICBASE
:
1236 data
= vcpu
->apic_base
;
1238 case MSR_IA32_MISC_ENABLE
:
1239 data
= vcpu
->ia32_misc_enable_msr
;
1241 #ifdef CONFIG_X86_64
1243 data
= vcpu
->shadow_efer
;
1247 printk(KERN_ERR
"kvm: unhandled rdmsr: 0x%x\n", msr
);
1253 EXPORT_SYMBOL_GPL(kvm_get_msr_common
);
1256 * Reads an msr value (of 'msr_index') into 'pdata'.
1257 * Returns 0 on success, non-0 otherwise.
1258 * Assumes vcpu_load() was already called.
1260 static int get_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64
*pdata
)
1262 return kvm_arch_ops
->get_msr(vcpu
, msr_index
, pdata
);
1265 #ifdef CONFIG_X86_64
1267 static void set_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
1269 if (efer
& EFER_RESERVED_BITS
) {
1270 printk(KERN_DEBUG
"set_efer: 0x%llx #GP, reserved bits\n",
1277 && (vcpu
->shadow_efer
& EFER_LME
) != (efer
& EFER_LME
)) {
1278 printk(KERN_DEBUG
"set_efer: #GP, change LME while paging\n");
1283 kvm_arch_ops
->set_efer(vcpu
, efer
);
1286 efer
|= vcpu
->shadow_efer
& EFER_LMA
;
1288 vcpu
->shadow_efer
= efer
;
1293 int kvm_set_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
)
1296 #ifdef CONFIG_X86_64
1298 set_efer(vcpu
, data
);
1301 case MSR_IA32_MC0_STATUS
:
1302 printk(KERN_WARNING
"%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
1303 __FUNCTION__
, data
);
1305 case MSR_IA32_UCODE_REV
:
1306 case MSR_IA32_UCODE_WRITE
:
1307 case 0x200 ... 0x2ff: /* MTRRs */
1309 case MSR_IA32_APICBASE
:
1310 vcpu
->apic_base
= data
;
1312 case MSR_IA32_MISC_ENABLE
:
1313 vcpu
->ia32_misc_enable_msr
= data
;
1316 printk(KERN_ERR
"kvm: unhandled wrmsr: 0x%x\n", msr
);
1321 EXPORT_SYMBOL_GPL(kvm_set_msr_common
);
1324 * Writes msr value into into the appropriate "register".
1325 * Returns 0 on success, non-0 otherwise.
1326 * Assumes vcpu_load() was already called.
1328 static int set_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64 data
)
1330 return kvm_arch_ops
->set_msr(vcpu
, msr_index
, data
);
1333 void kvm_resched(struct kvm_vcpu
*vcpu
)
1337 /* Cannot fail - no vcpu unplug yet. */
1338 vcpu_load(vcpu
->kvm
, vcpu_slot(vcpu
));
1340 EXPORT_SYMBOL_GPL(kvm_resched
);
1342 void load_msrs(struct vmx_msr_entry
*e
, int n
)
1346 for (i
= 0; i
< n
; ++i
)
1347 wrmsrl(e
[i
].index
, e
[i
].data
);
1349 EXPORT_SYMBOL_GPL(load_msrs
);
1351 void save_msrs(struct vmx_msr_entry
*e
, int n
)
1355 for (i
= 0; i
< n
; ++i
)
1356 rdmsrl(e
[i
].index
, e
[i
].data
);
1358 EXPORT_SYMBOL_GPL(save_msrs
);
1360 static int kvm_dev_ioctl_run(struct kvm
*kvm
, struct kvm_run
*kvm_run
)
1362 struct kvm_vcpu
*vcpu
;
1365 if (!valid_vcpu(kvm_run
->vcpu
))
1368 vcpu
= vcpu_load(kvm
, kvm_run
->vcpu
);
1372 /* re-sync apic's tpr */
1373 vcpu
->cr8
= kvm_run
->cr8
;
1375 if (kvm_run
->emulated
) {
1376 kvm_arch_ops
->skip_emulated_instruction(vcpu
);
1377 kvm_run
->emulated
= 0;
1380 if (kvm_run
->mmio_completed
) {
1381 memcpy(vcpu
->mmio_data
, kvm_run
->mmio
.data
, 8);
1382 vcpu
->mmio_read_completed
= 1;
1385 vcpu
->mmio_needed
= 0;
1387 r
= kvm_arch_ops
->run(vcpu
, kvm_run
);
1393 static int kvm_dev_ioctl_get_regs(struct kvm
*kvm
, struct kvm_regs
*regs
)
1395 struct kvm_vcpu
*vcpu
;
1397 if (!valid_vcpu(regs
->vcpu
))
1400 vcpu
= vcpu_load(kvm
, regs
->vcpu
);
1404 kvm_arch_ops
->cache_regs(vcpu
);
1406 regs
->rax
= vcpu
->regs
[VCPU_REGS_RAX
];
1407 regs
->rbx
= vcpu
->regs
[VCPU_REGS_RBX
];
1408 regs
->rcx
= vcpu
->regs
[VCPU_REGS_RCX
];
1409 regs
->rdx
= vcpu
->regs
[VCPU_REGS_RDX
];
1410 regs
->rsi
= vcpu
->regs
[VCPU_REGS_RSI
];
1411 regs
->rdi
= vcpu
->regs
[VCPU_REGS_RDI
];
1412 regs
->rsp
= vcpu
->regs
[VCPU_REGS_RSP
];
1413 regs
->rbp
= vcpu
->regs
[VCPU_REGS_RBP
];
1414 #ifdef CONFIG_X86_64
1415 regs
->r8
= vcpu
->regs
[VCPU_REGS_R8
];
1416 regs
->r9
= vcpu
->regs
[VCPU_REGS_R9
];
1417 regs
->r10
= vcpu
->regs
[VCPU_REGS_R10
];
1418 regs
->r11
= vcpu
->regs
[VCPU_REGS_R11
];
1419 regs
->r12
= vcpu
->regs
[VCPU_REGS_R12
];
1420 regs
->r13
= vcpu
->regs
[VCPU_REGS_R13
];
1421 regs
->r14
= vcpu
->regs
[VCPU_REGS_R14
];
1422 regs
->r15
= vcpu
->regs
[VCPU_REGS_R15
];
1425 regs
->rip
= vcpu
->rip
;
1426 regs
->rflags
= kvm_arch_ops
->get_rflags(vcpu
);
1429 * Don't leak debug flags in case they were set for guest debugging
1431 if (vcpu
->guest_debug
.enabled
&& vcpu
->guest_debug
.singlestep
)
1432 regs
->rflags
&= ~(X86_EFLAGS_TF
| X86_EFLAGS_RF
);
1439 static int kvm_dev_ioctl_set_regs(struct kvm
*kvm
, struct kvm_regs
*regs
)
1441 struct kvm_vcpu
*vcpu
;
1443 if (!valid_vcpu(regs
->vcpu
))
1446 vcpu
= vcpu_load(kvm
, regs
->vcpu
);
1450 vcpu
->regs
[VCPU_REGS_RAX
] = regs
->rax
;
1451 vcpu
->regs
[VCPU_REGS_RBX
] = regs
->rbx
;
1452 vcpu
->regs
[VCPU_REGS_RCX
] = regs
->rcx
;
1453 vcpu
->regs
[VCPU_REGS_RDX
] = regs
->rdx
;
1454 vcpu
->regs
[VCPU_REGS_RSI
] = regs
->rsi
;
1455 vcpu
->regs
[VCPU_REGS_RDI
] = regs
->rdi
;
1456 vcpu
->regs
[VCPU_REGS_RSP
] = regs
->rsp
;
1457 vcpu
->regs
[VCPU_REGS_RBP
] = regs
->rbp
;
1458 #ifdef CONFIG_X86_64
1459 vcpu
->regs
[VCPU_REGS_R8
] = regs
->r8
;
1460 vcpu
->regs
[VCPU_REGS_R9
] = regs
->r9
;
1461 vcpu
->regs
[VCPU_REGS_R10
] = regs
->r10
;
1462 vcpu
->regs
[VCPU_REGS_R11
] = regs
->r11
;
1463 vcpu
->regs
[VCPU_REGS_R12
] = regs
->r12
;
1464 vcpu
->regs
[VCPU_REGS_R13
] = regs
->r13
;
1465 vcpu
->regs
[VCPU_REGS_R14
] = regs
->r14
;
1466 vcpu
->regs
[VCPU_REGS_R15
] = regs
->r15
;
1469 vcpu
->rip
= regs
->rip
;
1470 kvm_arch_ops
->set_rflags(vcpu
, regs
->rflags
);
1472 kvm_arch_ops
->decache_regs(vcpu
);
1479 static void get_segment(struct kvm_vcpu
*vcpu
,
1480 struct kvm_segment
*var
, int seg
)
1482 return kvm_arch_ops
->get_segment(vcpu
, var
, seg
);
1485 static int kvm_dev_ioctl_get_sregs(struct kvm
*kvm
, struct kvm_sregs
*sregs
)
1487 struct kvm_vcpu
*vcpu
;
1488 struct descriptor_table dt
;
1490 if (!valid_vcpu(sregs
->vcpu
))
1492 vcpu
= vcpu_load(kvm
, sregs
->vcpu
);
1496 get_segment(vcpu
, &sregs
->cs
, VCPU_SREG_CS
);
1497 get_segment(vcpu
, &sregs
->ds
, VCPU_SREG_DS
);
1498 get_segment(vcpu
, &sregs
->es
, VCPU_SREG_ES
);
1499 get_segment(vcpu
, &sregs
->fs
, VCPU_SREG_FS
);
1500 get_segment(vcpu
, &sregs
->gs
, VCPU_SREG_GS
);
1501 get_segment(vcpu
, &sregs
->ss
, VCPU_SREG_SS
);
1503 get_segment(vcpu
, &sregs
->tr
, VCPU_SREG_TR
);
1504 get_segment(vcpu
, &sregs
->ldt
, VCPU_SREG_LDTR
);
1506 kvm_arch_ops
->get_idt(vcpu
, &dt
);
1507 sregs
->idt
.limit
= dt
.limit
;
1508 sregs
->idt
.base
= dt
.base
;
1509 kvm_arch_ops
->get_gdt(vcpu
, &dt
);
1510 sregs
->gdt
.limit
= dt
.limit
;
1511 sregs
->gdt
.base
= dt
.base
;
1513 kvm_arch_ops
->decache_cr0_cr4_guest_bits(vcpu
);
1514 sregs
->cr0
= vcpu
->cr0
;
1515 sregs
->cr2
= vcpu
->cr2
;
1516 sregs
->cr3
= vcpu
->cr3
;
1517 sregs
->cr4
= vcpu
->cr4
;
1518 sregs
->cr8
= vcpu
->cr8
;
1519 sregs
->efer
= vcpu
->shadow_efer
;
1520 sregs
->apic_base
= vcpu
->apic_base
;
1522 memcpy(sregs
->interrupt_bitmap
, vcpu
->irq_pending
,
1523 sizeof sregs
->interrupt_bitmap
);
1530 static void set_segment(struct kvm_vcpu
*vcpu
,
1531 struct kvm_segment
*var
, int seg
)
1533 return kvm_arch_ops
->set_segment(vcpu
, var
, seg
);
1536 static int kvm_dev_ioctl_set_sregs(struct kvm
*kvm
, struct kvm_sregs
*sregs
)
1538 struct kvm_vcpu
*vcpu
;
1539 int mmu_reset_needed
= 0;
1541 struct descriptor_table dt
;
1543 if (!valid_vcpu(sregs
->vcpu
))
1545 vcpu
= vcpu_load(kvm
, sregs
->vcpu
);
1549 set_segment(vcpu
, &sregs
->cs
, VCPU_SREG_CS
);
1550 set_segment(vcpu
, &sregs
->ds
, VCPU_SREG_DS
);
1551 set_segment(vcpu
, &sregs
->es
, VCPU_SREG_ES
);
1552 set_segment(vcpu
, &sregs
->fs
, VCPU_SREG_FS
);
1553 set_segment(vcpu
, &sregs
->gs
, VCPU_SREG_GS
);
1554 set_segment(vcpu
, &sregs
->ss
, VCPU_SREG_SS
);
1556 set_segment(vcpu
, &sregs
->tr
, VCPU_SREG_TR
);
1557 set_segment(vcpu
, &sregs
->ldt
, VCPU_SREG_LDTR
);
1559 dt
.limit
= sregs
->idt
.limit
;
1560 dt
.base
= sregs
->idt
.base
;
1561 kvm_arch_ops
->set_idt(vcpu
, &dt
);
1562 dt
.limit
= sregs
->gdt
.limit
;
1563 dt
.base
= sregs
->gdt
.base
;
1564 kvm_arch_ops
->set_gdt(vcpu
, &dt
);
1566 vcpu
->cr2
= sregs
->cr2
;
1567 mmu_reset_needed
|= vcpu
->cr3
!= sregs
->cr3
;
1568 vcpu
->cr3
= sregs
->cr3
;
1570 vcpu
->cr8
= sregs
->cr8
;
1572 mmu_reset_needed
|= vcpu
->shadow_efer
!= sregs
->efer
;
1573 #ifdef CONFIG_X86_64
1574 kvm_arch_ops
->set_efer(vcpu
, sregs
->efer
);
1576 vcpu
->apic_base
= sregs
->apic_base
;
1578 kvm_arch_ops
->decache_cr0_cr4_guest_bits(vcpu
);
1580 mmu_reset_needed
|= vcpu
->cr0
!= sregs
->cr0
;
1581 kvm_arch_ops
->set_cr0_no_modeswitch(vcpu
, sregs
->cr0
);
1583 mmu_reset_needed
|= vcpu
->cr4
!= sregs
->cr4
;
1584 kvm_arch_ops
->set_cr4(vcpu
, sregs
->cr4
);
1585 if (!is_long_mode(vcpu
) && is_pae(vcpu
))
1586 load_pdptrs(vcpu
, vcpu
->cr3
);
1588 if (mmu_reset_needed
)
1589 kvm_mmu_reset_context(vcpu
);
1591 memcpy(vcpu
->irq_pending
, sregs
->interrupt_bitmap
,
1592 sizeof vcpu
->irq_pending
);
1593 vcpu
->irq_summary
= 0;
1594 for (i
= 0; i
< NR_IRQ_WORDS
; ++i
)
1595 if (vcpu
->irq_pending
[i
])
1596 __set_bit(i
, &vcpu
->irq_summary
);
1604 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
1605 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
1607 * This list is modified at module load time to reflect the
1608 * capabilities of the host cpu.
1610 static u32 msrs_to_save
[] = {
1611 MSR_IA32_SYSENTER_CS
, MSR_IA32_SYSENTER_ESP
, MSR_IA32_SYSENTER_EIP
,
1613 #ifdef CONFIG_X86_64
1614 MSR_CSTAR
, MSR_KERNEL_GS_BASE
, MSR_SYSCALL_MASK
, MSR_LSTAR
,
1616 MSR_IA32_TIME_STAMP_COUNTER
,
1619 static unsigned num_msrs_to_save
;
1621 static u32 emulated_msrs
[] = {
1622 MSR_IA32_MISC_ENABLE
,
1625 static __init
void kvm_init_msr_list(void)
1630 for (i
= j
= 0; i
< ARRAY_SIZE(msrs_to_save
); i
++) {
1631 if (rdmsr_safe(msrs_to_save
[i
], &dummy
[0], &dummy
[1]) < 0)
1634 msrs_to_save
[j
] = msrs_to_save
[i
];
1637 num_msrs_to_save
= j
;
1641 * Adapt set_msr() to msr_io()'s calling convention
1643 static int do_set_msr(struct kvm_vcpu
*vcpu
, unsigned index
, u64
*data
)
1645 return set_msr(vcpu
, index
, *data
);
1649 * Read or write a bunch of msrs. All parameters are kernel addresses.
1651 * @return number of msrs set successfully.
1653 static int __msr_io(struct kvm
*kvm
, struct kvm_msrs
*msrs
,
1654 struct kvm_msr_entry
*entries
,
1655 int (*do_msr
)(struct kvm_vcpu
*vcpu
,
1656 unsigned index
, u64
*data
))
1658 struct kvm_vcpu
*vcpu
;
1661 if (!valid_vcpu(msrs
->vcpu
))
1664 vcpu
= vcpu_load(kvm
, msrs
->vcpu
);
1668 for (i
= 0; i
< msrs
->nmsrs
; ++i
)
1669 if (do_msr(vcpu
, entries
[i
].index
, &entries
[i
].data
))
1678 * Read or write a bunch of msrs. Parameters are user addresses.
1680 * @return number of msrs set successfully.
1682 static int msr_io(struct kvm
*kvm
, struct kvm_msrs __user
*user_msrs
,
1683 int (*do_msr
)(struct kvm_vcpu
*vcpu
,
1684 unsigned index
, u64
*data
),
1687 struct kvm_msrs msrs
;
1688 struct kvm_msr_entry
*entries
;
1693 if (copy_from_user(&msrs
, user_msrs
, sizeof msrs
))
1697 if (msrs
.nmsrs
>= MAX_IO_MSRS
)
1701 size
= sizeof(struct kvm_msr_entry
) * msrs
.nmsrs
;
1702 entries
= vmalloc(size
);
1707 if (copy_from_user(entries
, user_msrs
->entries
, size
))
1710 r
= n
= __msr_io(kvm
, &msrs
, entries
, do_msr
);
1715 if (writeback
&& copy_to_user(user_msrs
->entries
, entries
, size
))
1727 * Translate a guest virtual address to a guest physical address.
1729 static int kvm_dev_ioctl_translate(struct kvm
*kvm
, struct kvm_translation
*tr
)
1731 unsigned long vaddr
= tr
->linear_address
;
1732 struct kvm_vcpu
*vcpu
;
1735 vcpu
= vcpu_load(kvm
, tr
->vcpu
);
1738 spin_lock(&kvm
->lock
);
1739 gpa
= vcpu
->mmu
.gva_to_gpa(vcpu
, vaddr
);
1740 tr
->physical_address
= gpa
;
1741 tr
->valid
= gpa
!= UNMAPPED_GVA
;
1744 spin_unlock(&kvm
->lock
);
1750 static int kvm_dev_ioctl_interrupt(struct kvm
*kvm
, struct kvm_interrupt
*irq
)
1752 struct kvm_vcpu
*vcpu
;
1754 if (!valid_vcpu(irq
->vcpu
))
1756 if (irq
->irq
< 0 || irq
->irq
>= 256)
1758 vcpu
= vcpu_load(kvm
, irq
->vcpu
);
1762 set_bit(irq
->irq
, vcpu
->irq_pending
);
1763 set_bit(irq
->irq
/ BITS_PER_LONG
, &vcpu
->irq_summary
);
1770 static int kvm_dev_ioctl_debug_guest(struct kvm
*kvm
,
1771 struct kvm_debug_guest
*dbg
)
1773 struct kvm_vcpu
*vcpu
;
1776 if (!valid_vcpu(dbg
->vcpu
))
1778 vcpu
= vcpu_load(kvm
, dbg
->vcpu
);
1782 r
= kvm_arch_ops
->set_guest_debug(vcpu
, dbg
);
1789 static long kvm_dev_ioctl(struct file
*filp
,
1790 unsigned int ioctl
, unsigned long arg
)
1792 struct kvm
*kvm
= filp
->private_data
;
1793 void __user
*argp
= (void __user
*)arg
;
1797 case KVM_GET_API_VERSION
:
1798 r
= KVM_API_VERSION
;
1800 case KVM_CREATE_VCPU
:
1801 r
= kvm_dev_ioctl_create_vcpu(kvm
, arg
);
1806 struct kvm_run kvm_run
;
1809 if (copy_from_user(&kvm_run
, argp
, sizeof kvm_run
))
1811 r
= kvm_dev_ioctl_run(kvm
, &kvm_run
);
1812 if (r
< 0 && r
!= -EINTR
)
1814 if (copy_to_user(argp
, &kvm_run
, sizeof kvm_run
)) {
1820 case KVM_GET_REGS
: {
1821 struct kvm_regs kvm_regs
;
1824 if (copy_from_user(&kvm_regs
, argp
, sizeof kvm_regs
))
1826 r
= kvm_dev_ioctl_get_regs(kvm
, &kvm_regs
);
1830 if (copy_to_user(argp
, &kvm_regs
, sizeof kvm_regs
))
1835 case KVM_SET_REGS
: {
1836 struct kvm_regs kvm_regs
;
1839 if (copy_from_user(&kvm_regs
, argp
, sizeof kvm_regs
))
1841 r
= kvm_dev_ioctl_set_regs(kvm
, &kvm_regs
);
1847 case KVM_GET_SREGS
: {
1848 struct kvm_sregs kvm_sregs
;
1851 if (copy_from_user(&kvm_sregs
, argp
, sizeof kvm_sregs
))
1853 r
= kvm_dev_ioctl_get_sregs(kvm
, &kvm_sregs
);
1857 if (copy_to_user(argp
, &kvm_sregs
, sizeof kvm_sregs
))
1862 case KVM_SET_SREGS
: {
1863 struct kvm_sregs kvm_sregs
;
1866 if (copy_from_user(&kvm_sregs
, argp
, sizeof kvm_sregs
))
1868 r
= kvm_dev_ioctl_set_sregs(kvm
, &kvm_sregs
);
1874 case KVM_TRANSLATE
: {
1875 struct kvm_translation tr
;
1878 if (copy_from_user(&tr
, argp
, sizeof tr
))
1880 r
= kvm_dev_ioctl_translate(kvm
, &tr
);
1884 if (copy_to_user(argp
, &tr
, sizeof tr
))
1889 case KVM_INTERRUPT
: {
1890 struct kvm_interrupt irq
;
1893 if (copy_from_user(&irq
, argp
, sizeof irq
))
1895 r
= kvm_dev_ioctl_interrupt(kvm
, &irq
);
1901 case KVM_DEBUG_GUEST
: {
1902 struct kvm_debug_guest dbg
;
1905 if (copy_from_user(&dbg
, argp
, sizeof dbg
))
1907 r
= kvm_dev_ioctl_debug_guest(kvm
, &dbg
);
1913 case KVM_SET_MEMORY_REGION
: {
1914 struct kvm_memory_region kvm_mem
;
1917 if (copy_from_user(&kvm_mem
, argp
, sizeof kvm_mem
))
1919 r
= kvm_dev_ioctl_set_memory_region(kvm
, &kvm_mem
);
1924 case KVM_GET_DIRTY_LOG
: {
1925 struct kvm_dirty_log log
;
1928 if (copy_from_user(&log
, argp
, sizeof log
))
1930 r
= kvm_dev_ioctl_get_dirty_log(kvm
, &log
);
1936 r
= msr_io(kvm
, argp
, get_msr
, 1);
1939 r
= msr_io(kvm
, argp
, do_set_msr
, 0);
1941 case KVM_GET_MSR_INDEX_LIST
: {
1942 struct kvm_msr_list __user
*user_msr_list
= argp
;
1943 struct kvm_msr_list msr_list
;
1947 if (copy_from_user(&msr_list
, user_msr_list
, sizeof msr_list
))
1950 msr_list
.nmsrs
= num_msrs_to_save
+ ARRAY_SIZE(emulated_msrs
);
1951 if (copy_to_user(user_msr_list
, &msr_list
, sizeof msr_list
))
1954 if (n
< num_msrs_to_save
)
1957 if (copy_to_user(user_msr_list
->indices
, &msrs_to_save
,
1958 num_msrs_to_save
* sizeof(u32
)))
1960 if (copy_to_user(user_msr_list
->indices
1961 + num_msrs_to_save
* sizeof(u32
),
1963 ARRAY_SIZE(emulated_msrs
) * sizeof(u32
)))
1975 static struct page
*kvm_dev_nopage(struct vm_area_struct
*vma
,
1976 unsigned long address
,
1979 struct kvm
*kvm
= vma
->vm_file
->private_data
;
1980 unsigned long pgoff
;
1981 struct kvm_memory_slot
*slot
;
1984 *type
= VM_FAULT_MINOR
;
1985 pgoff
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1986 slot
= gfn_to_memslot(kvm
, pgoff
);
1988 return NOPAGE_SIGBUS
;
1989 page
= gfn_to_page(slot
, pgoff
);
1991 return NOPAGE_SIGBUS
;
1996 static struct vm_operations_struct kvm_dev_vm_ops
= {
1997 .nopage
= kvm_dev_nopage
,
2000 static int kvm_dev_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2002 vma
->vm_ops
= &kvm_dev_vm_ops
;
2006 static struct file_operations kvm_chardev_ops
= {
2007 .open
= kvm_dev_open
,
2008 .release
= kvm_dev_release
,
2009 .unlocked_ioctl
= kvm_dev_ioctl
,
2010 .compat_ioctl
= kvm_dev_ioctl
,
2011 .mmap
= kvm_dev_mmap
,
2014 static struct miscdevice kvm_dev
= {
2020 static int kvm_reboot(struct notifier_block
*notifier
, unsigned long val
,
2023 if (val
== SYS_RESTART
) {
2025 * Some (well, at least mine) BIOSes hang on reboot if
2028 printk(KERN_INFO
"kvm: exiting hardware virtualization\n");
2029 on_each_cpu(kvm_arch_ops
->hardware_disable
, NULL
, 0, 1);
2034 static struct notifier_block kvm_reboot_notifier
= {
2035 .notifier_call
= kvm_reboot
,
2040 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
2043 static void decache_vcpus_on_cpu(int cpu
)
2046 struct kvm_vcpu
*vcpu
;
2049 spin_lock(&kvm_lock
);
2050 list_for_each_entry(vm
, &vm_list
, vm_list
)
2051 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
2052 vcpu
= &vm
->vcpus
[i
];
2054 * If the vcpu is locked, then it is running on some
2055 * other cpu and therefore it is not cached on the
2058 * If it's not locked, check the last cpu it executed
2061 if (mutex_trylock(&vcpu
->mutex
)) {
2062 if (vcpu
->cpu
== cpu
) {
2063 kvm_arch_ops
->vcpu_decache(vcpu
);
2066 mutex_unlock(&vcpu
->mutex
);
2069 spin_unlock(&kvm_lock
);
2072 static int kvm_cpu_hotplug(struct notifier_block
*notifier
, unsigned long val
,
2078 case CPU_DOWN_PREPARE
:
2079 case CPU_UP_CANCELED
:
2080 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
2082 decache_vcpus_on_cpu(cpu
);
2083 smp_call_function_single(cpu
, kvm_arch_ops
->hardware_disable
,
2087 printk(KERN_INFO
"kvm: enabling virtualization on CPU%d\n",
2089 smp_call_function_single(cpu
, kvm_arch_ops
->hardware_enable
,
2096 static struct notifier_block kvm_cpu_notifier
= {
2097 .notifier_call
= kvm_cpu_hotplug
,
2098 .priority
= 20, /* must be > scheduler priority */
2101 static __init
void kvm_init_debug(void)
2103 struct kvm_stats_debugfs_item
*p
;
2105 debugfs_dir
= debugfs_create_dir("kvm", NULL
);
2106 for (p
= debugfs_entries
; p
->name
; ++p
)
2107 p
->dentry
= debugfs_create_u32(p
->name
, 0444, debugfs_dir
,
2111 static void kvm_exit_debug(void)
2113 struct kvm_stats_debugfs_item
*p
;
2115 for (p
= debugfs_entries
; p
->name
; ++p
)
2116 debugfs_remove(p
->dentry
);
2117 debugfs_remove(debugfs_dir
);
2120 static int kvm_suspend(struct sys_device
*dev
, pm_message_t state
)
2122 decache_vcpus_on_cpu(raw_smp_processor_id());
2123 on_each_cpu(kvm_arch_ops
->hardware_disable
, 0, 0, 1);
2127 static int kvm_resume(struct sys_device
*dev
)
2129 on_each_cpu(kvm_arch_ops
->hardware_enable
, 0, 0, 1);
2133 static struct sysdev_class kvm_sysdev_class
= {
2134 set_kset_name("kvm"),
2135 .suspend
= kvm_suspend
,
2136 .resume
= kvm_resume
,
2139 static struct sys_device kvm_sysdev
= {
2141 .cls
= &kvm_sysdev_class
,
2144 hpa_t bad_page_address
;
2146 int kvm_init_arch(struct kvm_arch_ops
*ops
, struct module
*module
)
2151 printk(KERN_ERR
"kvm: already loaded the other module\n");
2155 if (!ops
->cpu_has_kvm_support()) {
2156 printk(KERN_ERR
"kvm: no hardware support\n");
2159 if (ops
->disabled_by_bios()) {
2160 printk(KERN_ERR
"kvm: disabled by bios\n");
2166 r
= kvm_arch_ops
->hardware_setup();
2170 on_each_cpu(kvm_arch_ops
->hardware_enable
, NULL
, 0, 1);
2171 r
= register_cpu_notifier(&kvm_cpu_notifier
);
2174 register_reboot_notifier(&kvm_reboot_notifier
);
2176 r
= sysdev_class_register(&kvm_sysdev_class
);
2180 r
= sysdev_register(&kvm_sysdev
);
2184 kvm_chardev_ops
.owner
= module
;
2186 r
= misc_register(&kvm_dev
);
2188 printk (KERN_ERR
"kvm: misc device register failed\n");
2195 sysdev_unregister(&kvm_sysdev
);
2197 sysdev_class_unregister(&kvm_sysdev_class
);
2199 unregister_reboot_notifier(&kvm_reboot_notifier
);
2200 unregister_cpu_notifier(&kvm_cpu_notifier
);
2202 on_each_cpu(kvm_arch_ops
->hardware_disable
, NULL
, 0, 1);
2203 kvm_arch_ops
->hardware_unsetup();
2207 void kvm_exit_arch(void)
2209 misc_deregister(&kvm_dev
);
2210 sysdev_unregister(&kvm_sysdev
);
2211 sysdev_class_unregister(&kvm_sysdev_class
);
2212 unregister_reboot_notifier(&kvm_reboot_notifier
);
2213 unregister_cpu_notifier(&kvm_cpu_notifier
);
2214 on_each_cpu(kvm_arch_ops
->hardware_disable
, NULL
, 0, 1);
2215 kvm_arch_ops
->hardware_unsetup();
2216 kvm_arch_ops
= NULL
;
2219 static __init
int kvm_init(void)
2221 static struct page
*bad_page
;
2226 kvm_init_msr_list();
2228 if ((bad_page
= alloc_page(GFP_KERNEL
)) == NULL
) {
2233 bad_page_address
= page_to_pfn(bad_page
) << PAGE_SHIFT
;
2234 memset(__va(bad_page_address
), 0, PAGE_SIZE
);
2243 static __exit
void kvm_exit(void)
2246 __free_page(pfn_to_page(bad_page_address
>> PAGE_SHIFT
));
2249 module_init(kvm_init
)
2250 module_exit(kvm_exit
)
2252 EXPORT_SYMBOL_GPL(kvm_init_arch
);
2253 EXPORT_SYMBOL_GPL(kvm_exit_arch
);