2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
22 #include <linux/kvm.h>
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/percpu.h>
26 #include <linux/gfp.h>
28 #include <linux/miscdevice.h>
29 #include <linux/vmalloc.h>
30 #include <linux/reboot.h>
31 #include <linux/debugfs.h>
32 #include <linux/highmem.h>
33 #include <linux/file.h>
34 #include <linux/sysdev.h>
35 #include <linux/cpu.h>
36 #include <linux/sched.h>
37 #include <linux/cpumask.h>
38 #include <linux/smp.h>
39 #include <linux/anon_inodes.h>
40 #include <linux/profile.h>
41 #include <linux/kvm_para.h>
42 #include <linux/pagemap.h>
43 #include <linux/mman.h>
45 #include <asm/processor.h>
47 #include <asm/uaccess.h>
50 MODULE_AUTHOR("Qumranet");
51 MODULE_LICENSE("GPL");
53 DEFINE_SPINLOCK(kvm_lock
);
56 static cpumask_t cpus_hardware_enabled
;
58 struct kmem_cache
*kvm_vcpu_cache
;
59 EXPORT_SYMBOL_GPL(kvm_vcpu_cache
);
61 static __read_mostly
struct preempt_ops kvm_preempt_ops
;
63 static struct dentry
*debugfs_dir
;
65 static long kvm_vcpu_ioctl(struct file
*file
, unsigned int ioctl
,
68 static inline int valid_vcpu(int n
)
70 return likely(n
>= 0 && n
< KVM_MAX_VCPUS
);
74 * Switches to specified vcpu, until a matching vcpu_put()
76 void vcpu_load(struct kvm_vcpu
*vcpu
)
80 mutex_lock(&vcpu
->mutex
);
82 preempt_notifier_register(&vcpu
->preempt_notifier
);
83 kvm_arch_vcpu_load(vcpu
, cpu
);
87 void vcpu_put(struct kvm_vcpu
*vcpu
)
90 kvm_arch_vcpu_put(vcpu
);
91 preempt_notifier_unregister(&vcpu
->preempt_notifier
);
93 mutex_unlock(&vcpu
->mutex
);
96 static void ack_flush(void *_completed
)
100 void kvm_flush_remote_tlbs(struct kvm
*kvm
)
104 struct kvm_vcpu
*vcpu
;
107 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
108 vcpu
= kvm
->vcpus
[i
];
111 if (test_and_set_bit(KVM_REQ_TLB_FLUSH
, &vcpu
->requests
))
114 if (cpu
!= -1 && cpu
!= raw_smp_processor_id())
117 smp_call_function_mask(cpus
, ack_flush
, NULL
, 1);
120 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
)
125 mutex_init(&vcpu
->mutex
);
129 init_waitqueue_head(&vcpu
->wq
);
131 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
136 vcpu
->run
= page_address(page
);
138 r
= kvm_arch_vcpu_init(vcpu
);
144 free_page((unsigned long)vcpu
->run
);
148 EXPORT_SYMBOL_GPL(kvm_vcpu_init
);
150 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
)
152 kvm_arch_vcpu_uninit(vcpu
);
153 free_page((unsigned long)vcpu
->run
);
155 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit
);
157 static struct kvm
*kvm_create_vm(void)
159 struct kvm
*kvm
= kzalloc(sizeof(struct kvm
), GFP_KERNEL
);
162 return ERR_PTR(-ENOMEM
);
164 kvm_io_bus_init(&kvm
->pio_bus
);
165 mutex_init(&kvm
->lock
);
166 INIT_LIST_HEAD(&kvm
->active_mmu_pages
);
167 kvm_io_bus_init(&kvm
->mmio_bus
);
168 spin_lock(&kvm_lock
);
169 list_add(&kvm
->vm_list
, &vm_list
);
170 spin_unlock(&kvm_lock
);
175 * Free any memory in @free but not in @dont.
177 static void kvm_free_physmem_slot(struct kvm_memory_slot
*free
,
178 struct kvm_memory_slot
*dont
)
180 if (!dont
|| free
->rmap
!= dont
->rmap
)
183 if (!dont
|| free
->dirty_bitmap
!= dont
->dirty_bitmap
)
184 vfree(free
->dirty_bitmap
);
187 free
->dirty_bitmap
= NULL
;
191 static void kvm_free_physmem(struct kvm
*kvm
)
195 for (i
= 0; i
< kvm
->nmemslots
; ++i
)
196 kvm_free_physmem_slot(&kvm
->memslots
[i
], NULL
);
199 static void kvm_unload_vcpu_mmu(struct kvm_vcpu
*vcpu
)
202 kvm_mmu_unload(vcpu
);
206 static void kvm_free_vcpus(struct kvm
*kvm
)
211 * Unpin any mmu pages first.
213 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
)
215 kvm_unload_vcpu_mmu(kvm
->vcpus
[i
]);
216 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
218 kvm_arch_vcpu_free(kvm
->vcpus
[i
]);
219 kvm
->vcpus
[i
] = NULL
;
225 static void kvm_destroy_vm(struct kvm
*kvm
)
227 spin_lock(&kvm_lock
);
228 list_del(&kvm
->vm_list
);
229 spin_unlock(&kvm_lock
);
230 kvm_io_bus_destroy(&kvm
->pio_bus
);
231 kvm_io_bus_destroy(&kvm
->mmio_bus
);
235 kvm_free_physmem(kvm
);
239 static int kvm_vm_release(struct inode
*inode
, struct file
*filp
)
241 struct kvm
*kvm
= filp
->private_data
;
248 * Allocate some memory and give it an address in the guest physical address
251 * Discontiguous memory is allowed, mostly for framebuffers.
253 * Must be called holding kvm->lock.
255 int __kvm_set_memory_region(struct kvm
*kvm
,
256 struct kvm_userspace_memory_region
*mem
,
261 unsigned long npages
;
263 struct kvm_memory_slot
*memslot
;
264 struct kvm_memory_slot old
, new;
267 /* General sanity checks */
268 if (mem
->memory_size
& (PAGE_SIZE
- 1))
270 if (mem
->guest_phys_addr
& (PAGE_SIZE
- 1))
272 if (mem
->slot
>= KVM_MEMORY_SLOTS
+ KVM_PRIVATE_MEM_SLOTS
)
274 if (mem
->guest_phys_addr
+ mem
->memory_size
< mem
->guest_phys_addr
)
277 memslot
= &kvm
->memslots
[mem
->slot
];
278 base_gfn
= mem
->guest_phys_addr
>> PAGE_SHIFT
;
279 npages
= mem
->memory_size
>> PAGE_SHIFT
;
282 mem
->flags
&= ~KVM_MEM_LOG_DIRTY_PAGES
;
284 new = old
= *memslot
;
286 new.base_gfn
= base_gfn
;
288 new.flags
= mem
->flags
;
290 /* Disallow changing a memory slot's size. */
292 if (npages
&& old
.npages
&& npages
!= old
.npages
)
295 /* Check for overlaps */
297 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
298 struct kvm_memory_slot
*s
= &kvm
->memslots
[i
];
302 if (!((base_gfn
+ npages
<= s
->base_gfn
) ||
303 (base_gfn
>= s
->base_gfn
+ s
->npages
)))
307 /* Free page dirty bitmap if unneeded */
308 if (!(new.flags
& KVM_MEM_LOG_DIRTY_PAGES
))
309 new.dirty_bitmap
= NULL
;
313 /* Allocate if a slot is being created */
314 if (npages
&& !new.rmap
) {
315 new.rmap
= vmalloc(npages
* sizeof(struct page
*));
320 memset(new.rmap
, 0, npages
* sizeof(*new.rmap
));
322 new.user_alloc
= user_alloc
;
324 new.userspace_addr
= mem
->userspace_addr
;
326 down_write(¤t
->mm
->mmap_sem
);
327 new.userspace_addr
= do_mmap(NULL
, 0,
329 PROT_READ
| PROT_WRITE
,
330 MAP_SHARED
| MAP_ANONYMOUS
,
332 up_write(¤t
->mm
->mmap_sem
);
334 if (IS_ERR((void *)new.userspace_addr
))
338 if (!old
.user_alloc
&& old
.rmap
) {
341 down_write(¤t
->mm
->mmap_sem
);
342 ret
= do_munmap(current
->mm
, old
.userspace_addr
,
343 old
.npages
* PAGE_SIZE
);
344 up_write(¤t
->mm
->mmap_sem
);
347 "kvm_vm_ioctl_set_memory_region: "
348 "failed to munmap memory\n");
352 /* Allocate page dirty bitmap if needed */
353 if ((new.flags
& KVM_MEM_LOG_DIRTY_PAGES
) && !new.dirty_bitmap
) {
354 unsigned dirty_bytes
= ALIGN(npages
, BITS_PER_LONG
) / 8;
356 new.dirty_bitmap
= vmalloc(dirty_bytes
);
357 if (!new.dirty_bitmap
)
359 memset(new.dirty_bitmap
, 0, dirty_bytes
);
362 if (mem
->slot
>= kvm
->nmemslots
)
363 kvm
->nmemslots
= mem
->slot
+ 1;
365 if (!kvm
->n_requested_mmu_pages
) {
366 unsigned int n_pages
;
369 n_pages
= npages
* KVM_PERMILLE_MMU_PAGES
/ 1000;
370 kvm_mmu_change_mmu_pages(kvm
, kvm
->n_alloc_mmu_pages
+
373 unsigned int nr_mmu_pages
;
375 n_pages
= old
.npages
* KVM_PERMILLE_MMU_PAGES
/ 1000;
376 nr_mmu_pages
= kvm
->n_alloc_mmu_pages
- n_pages
;
377 nr_mmu_pages
= max(nr_mmu_pages
,
378 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES
);
379 kvm_mmu_change_mmu_pages(kvm
, nr_mmu_pages
);
385 kvm_mmu_slot_remove_write_access(kvm
, mem
->slot
);
386 kvm_flush_remote_tlbs(kvm
);
388 kvm_free_physmem_slot(&old
, &new);
392 kvm_free_physmem_slot(&new, &old
);
397 EXPORT_SYMBOL_GPL(__kvm_set_memory_region
);
399 int kvm_set_memory_region(struct kvm
*kvm
,
400 struct kvm_userspace_memory_region
*mem
,
405 mutex_lock(&kvm
->lock
);
406 r
= __kvm_set_memory_region(kvm
, mem
, user_alloc
);
407 mutex_unlock(&kvm
->lock
);
410 EXPORT_SYMBOL_GPL(kvm_set_memory_region
);
412 int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
414 kvm_userspace_memory_region
*mem
,
417 if (mem
->slot
>= KVM_MEMORY_SLOTS
)
419 return kvm_set_memory_region(kvm
, mem
, user_alloc
);
423 * Get (and clear) the dirty memory log for a memory slot.
425 static int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
426 struct kvm_dirty_log
*log
)
428 struct kvm_memory_slot
*memslot
;
431 unsigned long any
= 0;
433 mutex_lock(&kvm
->lock
);
436 if (log
->slot
>= KVM_MEMORY_SLOTS
)
439 memslot
= &kvm
->memslots
[log
->slot
];
441 if (!memslot
->dirty_bitmap
)
444 n
= ALIGN(memslot
->npages
, BITS_PER_LONG
) / 8;
446 for (i
= 0; !any
&& i
< n
/sizeof(long); ++i
)
447 any
= memslot
->dirty_bitmap
[i
];
450 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
453 /* If nothing is dirty, don't bother messing with page tables. */
455 kvm_mmu_slot_remove_write_access(kvm
, log
->slot
);
456 kvm_flush_remote_tlbs(kvm
);
457 memset(memslot
->dirty_bitmap
, 0, n
);
463 mutex_unlock(&kvm
->lock
);
467 int is_error_page(struct page
*page
)
469 return page
== bad_page
;
471 EXPORT_SYMBOL_GPL(is_error_page
);
473 static inline unsigned long bad_hva(void)
478 int kvm_is_error_hva(unsigned long addr
)
480 return addr
== bad_hva();
482 EXPORT_SYMBOL_GPL(kvm_is_error_hva
);
484 gfn_t
unalias_gfn(struct kvm
*kvm
, gfn_t gfn
)
487 struct kvm_mem_alias
*alias
;
489 for (i
= 0; i
< kvm
->naliases
; ++i
) {
490 alias
= &kvm
->aliases
[i
];
491 if (gfn
>= alias
->base_gfn
492 && gfn
< alias
->base_gfn
+ alias
->npages
)
493 return alias
->target_gfn
+ gfn
- alias
->base_gfn
;
498 static struct kvm_memory_slot
*__gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
502 for (i
= 0; i
< kvm
->nmemslots
; ++i
) {
503 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
505 if (gfn
>= memslot
->base_gfn
506 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
512 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
514 gfn
= unalias_gfn(kvm
, gfn
);
515 return __gfn_to_memslot(kvm
, gfn
);
518 int kvm_is_visible_gfn(struct kvm
*kvm
, gfn_t gfn
)
522 gfn
= unalias_gfn(kvm
, gfn
);
523 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
524 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
526 if (gfn
>= memslot
->base_gfn
527 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
532 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn
);
534 static unsigned long gfn_to_hva(struct kvm
*kvm
, gfn_t gfn
)
536 struct kvm_memory_slot
*slot
;
538 gfn
= unalias_gfn(kvm
, gfn
);
539 slot
= __gfn_to_memslot(kvm
, gfn
);
542 return (slot
->userspace_addr
+ (gfn
- slot
->base_gfn
) * PAGE_SIZE
);
546 * Requires current->mm->mmap_sem to be held
548 static struct page
*__gfn_to_page(struct kvm
*kvm
, gfn_t gfn
)
550 struct page
*page
[1];
556 addr
= gfn_to_hva(kvm
, gfn
);
557 if (kvm_is_error_hva(addr
)) {
562 npages
= get_user_pages(current
, current
->mm
, addr
, 1, 1, 1, page
,
573 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
)
577 down_read(¤t
->mm
->mmap_sem
);
578 page
= __gfn_to_page(kvm
, gfn
);
579 up_read(¤t
->mm
->mmap_sem
);
584 EXPORT_SYMBOL_GPL(gfn_to_page
);
586 void kvm_release_page(struct page
*page
)
588 if (!PageReserved(page
))
592 EXPORT_SYMBOL_GPL(kvm_release_page
);
594 static int next_segment(unsigned long len
, int offset
)
596 if (len
> PAGE_SIZE
- offset
)
597 return PAGE_SIZE
- offset
;
602 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
608 addr
= gfn_to_hva(kvm
, gfn
);
609 if (kvm_is_error_hva(addr
))
611 r
= copy_from_user(data
, (void __user
*)addr
+ offset
, len
);
616 EXPORT_SYMBOL_GPL(kvm_read_guest_page
);
618 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
)
620 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
622 int offset
= offset_in_page(gpa
);
625 while ((seg
= next_segment(len
, offset
)) != 0) {
626 ret
= kvm_read_guest_page(kvm
, gfn
, data
, offset
, seg
);
636 EXPORT_SYMBOL_GPL(kvm_read_guest
);
638 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
, const void *data
,
644 addr
= gfn_to_hva(kvm
, gfn
);
645 if (kvm_is_error_hva(addr
))
647 r
= copy_to_user((void __user
*)addr
+ offset
, data
, len
);
650 mark_page_dirty(kvm
, gfn
);
653 EXPORT_SYMBOL_GPL(kvm_write_guest_page
);
655 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
658 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
660 int offset
= offset_in_page(gpa
);
663 while ((seg
= next_segment(len
, offset
)) != 0) {
664 ret
= kvm_write_guest_page(kvm
, gfn
, data
, offset
, seg
);
675 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
)
680 page
= gfn_to_page(kvm
, gfn
);
681 if (is_error_page(page
)) {
682 kvm_release_page(page
);
685 page_virt
= kmap_atomic(page
, KM_USER0
);
687 memset(page_virt
+ offset
, 0, len
);
689 kunmap_atomic(page_virt
, KM_USER0
);
690 kvm_release_page(page
);
691 mark_page_dirty(kvm
, gfn
);
694 EXPORT_SYMBOL_GPL(kvm_clear_guest_page
);
696 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
)
698 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
700 int offset
= offset_in_page(gpa
);
703 while ((seg
= next_segment(len
, offset
)) != 0) {
704 ret
= kvm_clear_guest_page(kvm
, gfn
, offset
, seg
);
713 EXPORT_SYMBOL_GPL(kvm_clear_guest
);
715 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
)
717 struct kvm_memory_slot
*memslot
;
719 gfn
= unalias_gfn(kvm
, gfn
);
720 memslot
= __gfn_to_memslot(kvm
, gfn
);
721 if (memslot
&& memslot
->dirty_bitmap
) {
722 unsigned long rel_gfn
= gfn
- memslot
->base_gfn
;
725 if (!test_bit(rel_gfn
, memslot
->dirty_bitmap
))
726 set_bit(rel_gfn
, memslot
->dirty_bitmap
);
731 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
733 void kvm_vcpu_block(struct kvm_vcpu
*vcpu
)
735 DECLARE_WAITQUEUE(wait
, current
);
737 add_wait_queue(&vcpu
->wq
, &wait
);
740 * We will block until either an interrupt or a signal wakes us up
742 while (!kvm_cpu_has_interrupt(vcpu
)
743 && !signal_pending(current
)
744 && vcpu
->mp_state
!= VCPU_MP_STATE_RUNNABLE
745 && vcpu
->mp_state
!= VCPU_MP_STATE_SIPI_RECEIVED
) {
746 set_current_state(TASK_INTERRUPTIBLE
);
752 __set_current_state(TASK_RUNNING
);
753 remove_wait_queue(&vcpu
->wq
, &wait
);
756 void kvm_resched(struct kvm_vcpu
*vcpu
)
762 EXPORT_SYMBOL_GPL(kvm_resched
);
764 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
,
765 struct kvm_interrupt
*irq
)
767 if (irq
->irq
< 0 || irq
->irq
>= 256)
769 if (irqchip_in_kernel(vcpu
->kvm
))
773 set_bit(irq
->irq
, vcpu
->irq_pending
);
774 set_bit(irq
->irq
/ BITS_PER_LONG
, &vcpu
->irq_summary
);
781 static struct page
*kvm_vcpu_nopage(struct vm_area_struct
*vma
,
782 unsigned long address
,
785 struct kvm_vcpu
*vcpu
= vma
->vm_file
->private_data
;
789 pgoff
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
791 page
= virt_to_page(vcpu
->run
);
792 else if (pgoff
== KVM_PIO_PAGE_OFFSET
)
793 page
= virt_to_page(vcpu
->pio_data
);
795 return NOPAGE_SIGBUS
;
798 *type
= VM_FAULT_MINOR
;
803 static struct vm_operations_struct kvm_vcpu_vm_ops
= {
804 .nopage
= kvm_vcpu_nopage
,
807 static int kvm_vcpu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
809 vma
->vm_ops
= &kvm_vcpu_vm_ops
;
813 static int kvm_vcpu_release(struct inode
*inode
, struct file
*filp
)
815 struct kvm_vcpu
*vcpu
= filp
->private_data
;
817 fput(vcpu
->kvm
->filp
);
821 static struct file_operations kvm_vcpu_fops
= {
822 .release
= kvm_vcpu_release
,
823 .unlocked_ioctl
= kvm_vcpu_ioctl
,
824 .compat_ioctl
= kvm_vcpu_ioctl
,
825 .mmap
= kvm_vcpu_mmap
,
829 * Allocates an inode for the vcpu.
831 static int create_vcpu_fd(struct kvm_vcpu
*vcpu
)
837 r
= anon_inode_getfd(&fd
, &inode
, &file
,
838 "kvm-vcpu", &kvm_vcpu_fops
, vcpu
);
841 atomic_inc(&vcpu
->kvm
->filp
->f_count
);
846 * Creates some virtual cpus. Good luck creating more than one.
848 static int kvm_vm_ioctl_create_vcpu(struct kvm
*kvm
, int n
)
851 struct kvm_vcpu
*vcpu
;
856 vcpu
= kvm_arch_vcpu_create(kvm
, n
);
858 return PTR_ERR(vcpu
);
860 preempt_notifier_init(&vcpu
->preempt_notifier
, &kvm_preempt_ops
);
862 mutex_lock(&kvm
->lock
);
865 mutex_unlock(&kvm
->lock
);
868 kvm
->vcpus
[n
] = vcpu
;
869 mutex_unlock(&kvm
->lock
);
871 /* Now it's all set up, let userspace reach it */
872 r
= create_vcpu_fd(vcpu
);
878 mutex_lock(&kvm
->lock
);
879 kvm
->vcpus
[n
] = NULL
;
880 mutex_unlock(&kvm
->lock
);
882 kvm_arch_vcpu_destory(vcpu
);
886 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu
*vcpu
, sigset_t
*sigset
)
889 sigdelsetmask(sigset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
890 vcpu
->sigset_active
= 1;
891 vcpu
->sigset
= *sigset
;
893 vcpu
->sigset_active
= 0;
897 static long kvm_vcpu_ioctl(struct file
*filp
,
898 unsigned int ioctl
, unsigned long arg
)
900 struct kvm_vcpu
*vcpu
= filp
->private_data
;
901 void __user
*argp
= (void __user
*)arg
;
909 r
= kvm_arch_vcpu_ioctl_run(vcpu
, vcpu
->run
);
912 struct kvm_regs kvm_regs
;
914 memset(&kvm_regs
, 0, sizeof kvm_regs
);
915 r
= kvm_arch_vcpu_ioctl_get_regs(vcpu
, &kvm_regs
);
919 if (copy_to_user(argp
, &kvm_regs
, sizeof kvm_regs
))
925 struct kvm_regs kvm_regs
;
928 if (copy_from_user(&kvm_regs
, argp
, sizeof kvm_regs
))
930 r
= kvm_arch_vcpu_ioctl_set_regs(vcpu
, &kvm_regs
);
936 case KVM_GET_SREGS
: {
937 struct kvm_sregs kvm_sregs
;
939 memset(&kvm_sregs
, 0, sizeof kvm_sregs
);
940 r
= kvm_arch_vcpu_ioctl_get_sregs(vcpu
, &kvm_sregs
);
944 if (copy_to_user(argp
, &kvm_sregs
, sizeof kvm_sregs
))
949 case KVM_SET_SREGS
: {
950 struct kvm_sregs kvm_sregs
;
953 if (copy_from_user(&kvm_sregs
, argp
, sizeof kvm_sregs
))
955 r
= kvm_arch_vcpu_ioctl_set_sregs(vcpu
, &kvm_sregs
);
961 case KVM_TRANSLATE
: {
962 struct kvm_translation tr
;
965 if (copy_from_user(&tr
, argp
, sizeof tr
))
967 r
= kvm_arch_vcpu_ioctl_translate(vcpu
, &tr
);
971 if (copy_to_user(argp
, &tr
, sizeof tr
))
976 case KVM_INTERRUPT
: {
977 struct kvm_interrupt irq
;
980 if (copy_from_user(&irq
, argp
, sizeof irq
))
982 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
988 case KVM_DEBUG_GUEST
: {
989 struct kvm_debug_guest dbg
;
992 if (copy_from_user(&dbg
, argp
, sizeof dbg
))
994 r
= kvm_arch_vcpu_ioctl_debug_guest(vcpu
, &dbg
);
1000 case KVM_SET_SIGNAL_MASK
: {
1001 struct kvm_signal_mask __user
*sigmask_arg
= argp
;
1002 struct kvm_signal_mask kvm_sigmask
;
1003 sigset_t sigset
, *p
;
1008 if (copy_from_user(&kvm_sigmask
, argp
,
1009 sizeof kvm_sigmask
))
1012 if (kvm_sigmask
.len
!= sizeof sigset
)
1015 if (copy_from_user(&sigset
, sigmask_arg
->sigset
,
1020 r
= kvm_vcpu_ioctl_set_sigmask(vcpu
, &sigset
);
1026 memset(&fpu
, 0, sizeof fpu
);
1027 r
= kvm_arch_vcpu_ioctl_get_fpu(vcpu
, &fpu
);
1031 if (copy_to_user(argp
, &fpu
, sizeof fpu
))
1040 if (copy_from_user(&fpu
, argp
, sizeof fpu
))
1042 r
= kvm_arch_vcpu_ioctl_set_fpu(vcpu
, &fpu
);
1049 r
= kvm_arch_vcpu_ioctl(filp
, ioctl
, arg
);
1055 static long kvm_vm_ioctl(struct file
*filp
,
1056 unsigned int ioctl
, unsigned long arg
)
1058 struct kvm
*kvm
= filp
->private_data
;
1059 void __user
*argp
= (void __user
*)arg
;
1063 case KVM_CREATE_VCPU
:
1064 r
= kvm_vm_ioctl_create_vcpu(kvm
, arg
);
1068 case KVM_SET_USER_MEMORY_REGION
: {
1069 struct kvm_userspace_memory_region kvm_userspace_mem
;
1072 if (copy_from_user(&kvm_userspace_mem
, argp
,
1073 sizeof kvm_userspace_mem
))
1076 r
= kvm_vm_ioctl_set_memory_region(kvm
, &kvm_userspace_mem
, 1);
1081 case KVM_GET_DIRTY_LOG
: {
1082 struct kvm_dirty_log log
;
1085 if (copy_from_user(&log
, argp
, sizeof log
))
1087 r
= kvm_vm_ioctl_get_dirty_log(kvm
, &log
);
1093 r
= kvm_arch_vm_ioctl(filp
, ioctl
, arg
);
1099 static struct page
*kvm_vm_nopage(struct vm_area_struct
*vma
,
1100 unsigned long address
,
1103 struct kvm
*kvm
= vma
->vm_file
->private_data
;
1104 unsigned long pgoff
;
1107 pgoff
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1108 if (!kvm_is_visible_gfn(kvm
, pgoff
))
1109 return NOPAGE_SIGBUS
;
1110 /* current->mm->mmap_sem is already held so call lockless version */
1111 page
= __gfn_to_page(kvm
, pgoff
);
1112 if (is_error_page(page
)) {
1113 kvm_release_page(page
);
1114 return NOPAGE_SIGBUS
;
1117 *type
= VM_FAULT_MINOR
;
1122 static struct vm_operations_struct kvm_vm_vm_ops
= {
1123 .nopage
= kvm_vm_nopage
,
1126 static int kvm_vm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1128 vma
->vm_ops
= &kvm_vm_vm_ops
;
1132 static struct file_operations kvm_vm_fops
= {
1133 .release
= kvm_vm_release
,
1134 .unlocked_ioctl
= kvm_vm_ioctl
,
1135 .compat_ioctl
= kvm_vm_ioctl
,
1136 .mmap
= kvm_vm_mmap
,
1139 static int kvm_dev_ioctl_create_vm(void)
1142 struct inode
*inode
;
1146 kvm
= kvm_create_vm();
1148 return PTR_ERR(kvm
);
1149 r
= anon_inode_getfd(&fd
, &inode
, &file
, "kvm-vm", &kvm_vm_fops
, kvm
);
1151 kvm_destroy_vm(kvm
);
1160 static long kvm_dev_ioctl(struct file
*filp
,
1161 unsigned int ioctl
, unsigned long arg
)
1163 void __user
*argp
= (void __user
*)arg
;
1167 case KVM_GET_API_VERSION
:
1171 r
= KVM_API_VERSION
;
1177 r
= kvm_dev_ioctl_create_vm();
1179 case KVM_CHECK_EXTENSION
:
1180 r
= kvm_dev_ioctl_check_extension((long)argp
);
1182 case KVM_GET_VCPU_MMAP_SIZE
:
1189 return kvm_arch_dev_ioctl(filp
, ioctl
, arg
);
1195 static struct file_operations kvm_chardev_ops
= {
1196 .unlocked_ioctl
= kvm_dev_ioctl
,
1197 .compat_ioctl
= kvm_dev_ioctl
,
1200 static struct miscdevice kvm_dev
= {
1206 static void hardware_enable(void *junk
)
1208 int cpu
= raw_smp_processor_id();
1210 if (cpu_isset(cpu
, cpus_hardware_enabled
))
1212 cpu_set(cpu
, cpus_hardware_enabled
);
1213 kvm_arch_hardware_enable(NULL
);
1216 static void hardware_disable(void *junk
)
1218 int cpu
= raw_smp_processor_id();
1220 if (!cpu_isset(cpu
, cpus_hardware_enabled
))
1222 cpu_clear(cpu
, cpus_hardware_enabled
);
1223 decache_vcpus_on_cpu(cpu
);
1224 kvm_arch_hardware_disable(NULL
);
1227 static int kvm_cpu_hotplug(struct notifier_block
*notifier
, unsigned long val
,
1232 val
&= ~CPU_TASKS_FROZEN
;
1235 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
1237 hardware_disable(NULL
);
1239 case CPU_UP_CANCELED
:
1240 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
1242 smp_call_function_single(cpu
, hardware_disable
, NULL
, 0, 1);
1245 printk(KERN_INFO
"kvm: enabling virtualization on CPU%d\n",
1247 smp_call_function_single(cpu
, hardware_enable
, NULL
, 0, 1);
1253 static int kvm_reboot(struct notifier_block
*notifier
, unsigned long val
,
1256 if (val
== SYS_RESTART
) {
1258 * Some (well, at least mine) BIOSes hang on reboot if
1261 printk(KERN_INFO
"kvm: exiting hardware virtualization\n");
1262 on_each_cpu(hardware_disable
, NULL
, 0, 1);
1267 static struct notifier_block kvm_reboot_notifier
= {
1268 .notifier_call
= kvm_reboot
,
1272 void kvm_io_bus_init(struct kvm_io_bus
*bus
)
1274 memset(bus
, 0, sizeof(*bus
));
1277 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
)
1281 for (i
= 0; i
< bus
->dev_count
; i
++) {
1282 struct kvm_io_device
*pos
= bus
->devs
[i
];
1284 kvm_iodevice_destructor(pos
);
1288 struct kvm_io_device
*kvm_io_bus_find_dev(struct kvm_io_bus
*bus
, gpa_t addr
)
1292 for (i
= 0; i
< bus
->dev_count
; i
++) {
1293 struct kvm_io_device
*pos
= bus
->devs
[i
];
1295 if (pos
->in_range(pos
, addr
))
1302 void kvm_io_bus_register_dev(struct kvm_io_bus
*bus
, struct kvm_io_device
*dev
)
1304 BUG_ON(bus
->dev_count
> (NR_IOBUS_DEVS
-1));
1306 bus
->devs
[bus
->dev_count
++] = dev
;
1309 static struct notifier_block kvm_cpu_notifier
= {
1310 .notifier_call
= kvm_cpu_hotplug
,
1311 .priority
= 20, /* must be > scheduler priority */
1314 static u64
stat_get(void *_offset
)
1316 unsigned offset
= (long)_offset
;
1319 struct kvm_vcpu
*vcpu
;
1322 spin_lock(&kvm_lock
);
1323 list_for_each_entry(kvm
, &vm_list
, vm_list
)
1324 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
1325 vcpu
= kvm
->vcpus
[i
];
1327 total
+= *(u32
*)((void *)vcpu
+ offset
);
1329 spin_unlock(&kvm_lock
);
1333 DEFINE_SIMPLE_ATTRIBUTE(stat_fops
, stat_get
, NULL
, "%llu\n");
1335 static __init
void kvm_init_debug(void)
1337 struct kvm_stats_debugfs_item
*p
;
1339 debugfs_dir
= debugfs_create_dir("kvm", NULL
);
1340 for (p
= debugfs_entries
; p
->name
; ++p
)
1341 p
->dentry
= debugfs_create_file(p
->name
, 0444, debugfs_dir
,
1342 (void *)(long)p
->offset
,
1346 static void kvm_exit_debug(void)
1348 struct kvm_stats_debugfs_item
*p
;
1350 for (p
= debugfs_entries
; p
->name
; ++p
)
1351 debugfs_remove(p
->dentry
);
1352 debugfs_remove(debugfs_dir
);
1355 static int kvm_suspend(struct sys_device
*dev
, pm_message_t state
)
1357 hardware_disable(NULL
);
1361 static int kvm_resume(struct sys_device
*dev
)
1363 hardware_enable(NULL
);
1367 static struct sysdev_class kvm_sysdev_class
= {
1369 .suspend
= kvm_suspend
,
1370 .resume
= kvm_resume
,
1373 static struct sys_device kvm_sysdev
= {
1375 .cls
= &kvm_sysdev_class
,
1378 struct page
*bad_page
;
1381 struct kvm_vcpu
*preempt_notifier_to_vcpu(struct preempt_notifier
*pn
)
1383 return container_of(pn
, struct kvm_vcpu
, preempt_notifier
);
1386 static void kvm_sched_in(struct preempt_notifier
*pn
, int cpu
)
1388 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
1390 kvm_arch_vcpu_load(vcpu
, cpu
);
1393 static void kvm_sched_out(struct preempt_notifier
*pn
,
1394 struct task_struct
*next
)
1396 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
1398 kvm_arch_vcpu_put(vcpu
);
1401 int kvm_init(void *opaque
, unsigned int vcpu_size
,
1402 struct module
*module
)
1407 r
= kvm_mmu_module_init();
1413 r
= kvm_arch_init(opaque
);
1417 bad_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
1419 if (bad_page
== NULL
) {
1424 r
= kvm_arch_hardware_setup();
1428 for_each_online_cpu(cpu
) {
1429 smp_call_function_single(cpu
,
1430 kvm_arch_check_processor_compat
,
1436 on_each_cpu(hardware_enable
, NULL
, 0, 1);
1437 r
= register_cpu_notifier(&kvm_cpu_notifier
);
1440 register_reboot_notifier(&kvm_reboot_notifier
);
1442 r
= sysdev_class_register(&kvm_sysdev_class
);
1446 r
= sysdev_register(&kvm_sysdev
);
1450 /* A kmem cache lets us meet the alignment requirements of fx_save. */
1451 kvm_vcpu_cache
= kmem_cache_create("kvm_vcpu", vcpu_size
,
1452 __alignof__(struct kvm_vcpu
),
1454 if (!kvm_vcpu_cache
) {
1459 kvm_chardev_ops
.owner
= module
;
1461 r
= misc_register(&kvm_dev
);
1463 printk(KERN_ERR
"kvm: misc device register failed\n");
1467 kvm_preempt_ops
.sched_in
= kvm_sched_in
;
1468 kvm_preempt_ops
.sched_out
= kvm_sched_out
;
1470 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
1475 kmem_cache_destroy(kvm_vcpu_cache
);
1477 sysdev_unregister(&kvm_sysdev
);
1479 sysdev_class_unregister(&kvm_sysdev_class
);
1481 unregister_reboot_notifier(&kvm_reboot_notifier
);
1482 unregister_cpu_notifier(&kvm_cpu_notifier
);
1484 on_each_cpu(hardware_disable
, NULL
, 0, 1);
1486 kvm_arch_hardware_unsetup();
1490 kvm_mmu_module_exit();
1494 EXPORT_SYMBOL_GPL(kvm_init
);
1498 misc_deregister(&kvm_dev
);
1499 kmem_cache_destroy(kvm_vcpu_cache
);
1500 sysdev_unregister(&kvm_sysdev
);
1501 sysdev_class_unregister(&kvm_sysdev_class
);
1502 unregister_reboot_notifier(&kvm_reboot_notifier
);
1503 unregister_cpu_notifier(&kvm_cpu_notifier
);
1504 on_each_cpu(hardware_disable
, NULL
, 0, 1);
1505 kvm_arch_hardware_unsetup();
1508 __free_page(bad_page
);
1509 kvm_mmu_module_exit();
1511 EXPORT_SYMBOL_GPL(kvm_exit
);