2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/percpu.h>
25 #include <linux/gfp.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
35 #include <linux/sched.h>
36 #include <linux/cpumask.h>
37 #include <linux/smp.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/profile.h>
40 #include <linux/kvm_para.h>
41 #include <linux/pagemap.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
44 #include <linux/bitops.h>
45 #include <linux/spinlock.h>
47 #include <asm/processor.h>
49 #include <asm/uaccess.h>
50 #include <asm/pgtable.h>
52 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
53 #include "coalesced_mmio.h"
56 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
57 #include <linux/pci.h>
58 #include <linux/interrupt.h>
62 MODULE_AUTHOR("Qumranet");
63 MODULE_LICENSE("GPL");
68 * kvm->lock --> kvm->irq_lock
71 DEFINE_SPINLOCK(kvm_lock
);
74 static cpumask_var_t cpus_hardware_enabled
;
76 struct kmem_cache
*kvm_vcpu_cache
;
77 EXPORT_SYMBOL_GPL(kvm_vcpu_cache
);
79 static __read_mostly
struct preempt_ops kvm_preempt_ops
;
81 struct dentry
*kvm_debugfs_dir
;
83 static long kvm_vcpu_ioctl(struct file
*file
, unsigned int ioctl
,
86 static bool kvm_rebooting
;
88 static bool largepages_enabled
= true;
90 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
91 static struct kvm_assigned_dev_kernel
*kvm_find_assigned_dev(struct list_head
*head
,
94 struct list_head
*ptr
;
95 struct kvm_assigned_dev_kernel
*match
;
97 list_for_each(ptr
, head
) {
98 match
= list_entry(ptr
, struct kvm_assigned_dev_kernel
, list
);
99 if (match
->assigned_dev_id
== assigned_dev_id
)
105 static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
106 *assigned_dev
, int irq
)
109 struct msix_entry
*host_msix_entries
;
111 host_msix_entries
= assigned_dev
->host_msix_entries
;
114 for (i
= 0; i
< assigned_dev
->entries_nr
; i
++)
115 if (irq
== host_msix_entries
[i
].vector
) {
120 printk(KERN_WARNING
"Fail to find correlated MSI-X entry!\n");
127 static void kvm_assigned_dev_interrupt_work_handler(struct work_struct
*work
)
129 struct kvm_assigned_dev_kernel
*assigned_dev
;
133 assigned_dev
= container_of(work
, struct kvm_assigned_dev_kernel
,
135 kvm
= assigned_dev
->kvm
;
137 mutex_lock(&kvm
->irq_lock
);
138 spin_lock_irq(&assigned_dev
->assigned_dev_lock
);
139 if (assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_HOST_MSIX
) {
140 struct kvm_guest_msix_entry
*guest_entries
=
141 assigned_dev
->guest_msix_entries
;
142 for (i
= 0; i
< assigned_dev
->entries_nr
; i
++) {
143 if (!(guest_entries
[i
].flags
&
144 KVM_ASSIGNED_MSIX_PENDING
))
146 guest_entries
[i
].flags
&= ~KVM_ASSIGNED_MSIX_PENDING
;
147 kvm_set_irq(assigned_dev
->kvm
,
148 assigned_dev
->irq_source_id
,
149 guest_entries
[i
].vector
, 1);
152 kvm_set_irq(assigned_dev
->kvm
, assigned_dev
->irq_source_id
,
153 assigned_dev
->guest_irq
, 1);
155 spin_unlock_irq(&assigned_dev
->assigned_dev_lock
);
156 mutex_unlock(&assigned_dev
->kvm
->irq_lock
);
159 static irqreturn_t
kvm_assigned_dev_intr(int irq
, void *dev_id
)
162 struct kvm_assigned_dev_kernel
*assigned_dev
=
163 (struct kvm_assigned_dev_kernel
*) dev_id
;
165 spin_lock_irqsave(&assigned_dev
->assigned_dev_lock
, flags
);
166 if (assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_HOST_MSIX
) {
167 int index
= find_index_from_host_irq(assigned_dev
, irq
);
170 assigned_dev
->guest_msix_entries
[index
].flags
|=
171 KVM_ASSIGNED_MSIX_PENDING
;
174 schedule_work(&assigned_dev
->interrupt_work
);
176 if (assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_GUEST_INTX
) {
177 disable_irq_nosync(irq
);
178 assigned_dev
->host_irq_disabled
= true;
182 spin_unlock_irqrestore(&assigned_dev
->assigned_dev_lock
, flags
);
186 /* Ack the irq line for an assigned device */
187 static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier
*kian
)
189 struct kvm_assigned_dev_kernel
*dev
;
195 dev
= container_of(kian
, struct kvm_assigned_dev_kernel
,
198 kvm_set_irq(dev
->kvm
, dev
->irq_source_id
, dev
->guest_irq
, 0);
200 /* The guest irq may be shared so this ack may be
201 * from another device.
203 spin_lock_irqsave(&dev
->assigned_dev_lock
, flags
);
204 if (dev
->host_irq_disabled
) {
205 enable_irq(dev
->host_irq
);
206 dev
->host_irq_disabled
= false;
208 spin_unlock_irqrestore(&dev
->assigned_dev_lock
, flags
);
211 static void deassign_guest_irq(struct kvm
*kvm
,
212 struct kvm_assigned_dev_kernel
*assigned_dev
)
214 kvm_unregister_irq_ack_notifier(kvm
, &assigned_dev
->ack_notifier
);
215 assigned_dev
->ack_notifier
.gsi
= -1;
217 if (assigned_dev
->irq_source_id
!= -1)
218 kvm_free_irq_source_id(kvm
, assigned_dev
->irq_source_id
);
219 assigned_dev
->irq_source_id
= -1;
220 assigned_dev
->irq_requested_type
&= ~(KVM_DEV_IRQ_GUEST_MASK
);
223 /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
224 static void deassign_host_irq(struct kvm
*kvm
,
225 struct kvm_assigned_dev_kernel
*assigned_dev
)
228 * In kvm_free_device_irq, cancel_work_sync return true if:
229 * 1. work is scheduled, and then cancelled.
230 * 2. work callback is executed.
232 * The first one ensured that the irq is disabled and no more events
233 * would happen. But for the second one, the irq may be enabled (e.g.
234 * for MSI). So we disable irq here to prevent further events.
236 * Notice this maybe result in nested disable if the interrupt type is
237 * INTx, but it's OK for we are going to free it.
239 * If this function is a part of VM destroy, please ensure that till
240 * now, the kvm state is still legal for probably we also have to wait
241 * interrupt_work done.
243 if (assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_HOST_MSIX
) {
245 for (i
= 0; i
< assigned_dev
->entries_nr
; i
++)
246 disable_irq_nosync(assigned_dev
->
247 host_msix_entries
[i
].vector
);
249 cancel_work_sync(&assigned_dev
->interrupt_work
);
251 for (i
= 0; i
< assigned_dev
->entries_nr
; i
++)
252 free_irq(assigned_dev
->host_msix_entries
[i
].vector
,
253 (void *)assigned_dev
);
255 assigned_dev
->entries_nr
= 0;
256 kfree(assigned_dev
->host_msix_entries
);
257 kfree(assigned_dev
->guest_msix_entries
);
258 pci_disable_msix(assigned_dev
->dev
);
260 /* Deal with MSI and INTx */
261 disable_irq_nosync(assigned_dev
->host_irq
);
262 cancel_work_sync(&assigned_dev
->interrupt_work
);
264 free_irq(assigned_dev
->host_irq
, (void *)assigned_dev
);
266 if (assigned_dev
->irq_requested_type
& KVM_DEV_IRQ_HOST_MSI
)
267 pci_disable_msi(assigned_dev
->dev
);
270 assigned_dev
->irq_requested_type
&= ~(KVM_DEV_IRQ_HOST_MASK
);
273 static int kvm_deassign_irq(struct kvm
*kvm
,
274 struct kvm_assigned_dev_kernel
*assigned_dev
,
275 unsigned long irq_requested_type
)
277 unsigned long guest_irq_type
, host_irq_type
;
279 if (!irqchip_in_kernel(kvm
))
281 /* no irq assignment to deassign */
282 if (!assigned_dev
->irq_requested_type
)
285 host_irq_type
= irq_requested_type
& KVM_DEV_IRQ_HOST_MASK
;
286 guest_irq_type
= irq_requested_type
& KVM_DEV_IRQ_GUEST_MASK
;
289 deassign_host_irq(kvm
, assigned_dev
);
291 deassign_guest_irq(kvm
, assigned_dev
);
296 static void kvm_free_assigned_irq(struct kvm
*kvm
,
297 struct kvm_assigned_dev_kernel
*assigned_dev
)
299 kvm_deassign_irq(kvm
, assigned_dev
, assigned_dev
->irq_requested_type
);
302 static void kvm_free_assigned_device(struct kvm
*kvm
,
303 struct kvm_assigned_dev_kernel
306 kvm_free_assigned_irq(kvm
, assigned_dev
);
308 pci_reset_function(assigned_dev
->dev
);
310 pci_release_regions(assigned_dev
->dev
);
311 pci_disable_device(assigned_dev
->dev
);
312 pci_dev_put(assigned_dev
->dev
);
314 list_del(&assigned_dev
->list
);
318 void kvm_free_all_assigned_devices(struct kvm
*kvm
)
320 struct list_head
*ptr
, *ptr2
;
321 struct kvm_assigned_dev_kernel
*assigned_dev
;
323 list_for_each_safe(ptr
, ptr2
, &kvm
->arch
.assigned_dev_head
) {
324 assigned_dev
= list_entry(ptr
,
325 struct kvm_assigned_dev_kernel
,
328 kvm_free_assigned_device(kvm
, assigned_dev
);
332 static int assigned_device_enable_host_intx(struct kvm
*kvm
,
333 struct kvm_assigned_dev_kernel
*dev
)
335 dev
->host_irq
= dev
->dev
->irq
;
336 /* Even though this is PCI, we don't want to use shared
337 * interrupts. Sharing host devices with guest-assigned devices
338 * on the same interrupt line is not a happy situation: there
339 * are going to be long delays in accepting, acking, etc.
341 if (request_irq(dev
->host_irq
, kvm_assigned_dev_intr
,
342 0, "kvm_assigned_intx_device", (void *)dev
))
347 #ifdef __KVM_HAVE_MSI
348 static int assigned_device_enable_host_msi(struct kvm
*kvm
,
349 struct kvm_assigned_dev_kernel
*dev
)
353 if (!dev
->dev
->msi_enabled
) {
354 r
= pci_enable_msi(dev
->dev
);
359 dev
->host_irq
= dev
->dev
->irq
;
360 if (request_irq(dev
->host_irq
, kvm_assigned_dev_intr
, 0,
361 "kvm_assigned_msi_device", (void *)dev
)) {
362 pci_disable_msi(dev
->dev
);
370 #ifdef __KVM_HAVE_MSIX
371 static int assigned_device_enable_host_msix(struct kvm
*kvm
,
372 struct kvm_assigned_dev_kernel
*dev
)
376 /* host_msix_entries and guest_msix_entries should have been
378 if (dev
->entries_nr
== 0)
381 r
= pci_enable_msix(dev
->dev
, dev
->host_msix_entries
, dev
->entries_nr
);
385 for (i
= 0; i
< dev
->entries_nr
; i
++) {
386 r
= request_irq(dev
->host_msix_entries
[i
].vector
,
387 kvm_assigned_dev_intr
, 0,
388 "kvm_assigned_msix_device",
390 /* FIXME: free requested_irq's on failure */
400 static int assigned_device_enable_guest_intx(struct kvm
*kvm
,
401 struct kvm_assigned_dev_kernel
*dev
,
402 struct kvm_assigned_irq
*irq
)
404 dev
->guest_irq
= irq
->guest_irq
;
405 dev
->ack_notifier
.gsi
= irq
->guest_irq
;
409 #ifdef __KVM_HAVE_MSI
410 static int assigned_device_enable_guest_msi(struct kvm
*kvm
,
411 struct kvm_assigned_dev_kernel
*dev
,
412 struct kvm_assigned_irq
*irq
)
414 dev
->guest_irq
= irq
->guest_irq
;
415 dev
->ack_notifier
.gsi
= -1;
416 dev
->host_irq_disabled
= false;
420 #ifdef __KVM_HAVE_MSIX
421 static int assigned_device_enable_guest_msix(struct kvm
*kvm
,
422 struct kvm_assigned_dev_kernel
*dev
,
423 struct kvm_assigned_irq
*irq
)
425 dev
->guest_irq
= irq
->guest_irq
;
426 dev
->ack_notifier
.gsi
= -1;
427 dev
->host_irq_disabled
= false;
432 static int assign_host_irq(struct kvm
*kvm
,
433 struct kvm_assigned_dev_kernel
*dev
,
438 if (dev
->irq_requested_type
& KVM_DEV_IRQ_HOST_MASK
)
441 switch (host_irq_type
) {
442 case KVM_DEV_IRQ_HOST_INTX
:
443 r
= assigned_device_enable_host_intx(kvm
, dev
);
445 #ifdef __KVM_HAVE_MSI
446 case KVM_DEV_IRQ_HOST_MSI
:
447 r
= assigned_device_enable_host_msi(kvm
, dev
);
450 #ifdef __KVM_HAVE_MSIX
451 case KVM_DEV_IRQ_HOST_MSIX
:
452 r
= assigned_device_enable_host_msix(kvm
, dev
);
460 dev
->irq_requested_type
|= host_irq_type
;
465 static int assign_guest_irq(struct kvm
*kvm
,
466 struct kvm_assigned_dev_kernel
*dev
,
467 struct kvm_assigned_irq
*irq
,
468 unsigned long guest_irq_type
)
473 if (dev
->irq_requested_type
& KVM_DEV_IRQ_GUEST_MASK
)
476 id
= kvm_request_irq_source_id(kvm
);
480 dev
->irq_source_id
= id
;
482 switch (guest_irq_type
) {
483 case KVM_DEV_IRQ_GUEST_INTX
:
484 r
= assigned_device_enable_guest_intx(kvm
, dev
, irq
);
486 #ifdef __KVM_HAVE_MSI
487 case KVM_DEV_IRQ_GUEST_MSI
:
488 r
= assigned_device_enable_guest_msi(kvm
, dev
, irq
);
491 #ifdef __KVM_HAVE_MSIX
492 case KVM_DEV_IRQ_GUEST_MSIX
:
493 r
= assigned_device_enable_guest_msix(kvm
, dev
, irq
);
501 dev
->irq_requested_type
|= guest_irq_type
;
502 kvm_register_irq_ack_notifier(kvm
, &dev
->ack_notifier
);
504 kvm_free_irq_source_id(kvm
, dev
->irq_source_id
);
509 /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
510 static int kvm_vm_ioctl_assign_irq(struct kvm
*kvm
,
511 struct kvm_assigned_irq
*assigned_irq
)
514 struct kvm_assigned_dev_kernel
*match
;
515 unsigned long host_irq_type
, guest_irq_type
;
517 if (!capable(CAP_SYS_RAWIO
))
520 if (!irqchip_in_kernel(kvm
))
523 mutex_lock(&kvm
->lock
);
525 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
526 assigned_irq
->assigned_dev_id
);
530 host_irq_type
= (assigned_irq
->flags
& KVM_DEV_IRQ_HOST_MASK
);
531 guest_irq_type
= (assigned_irq
->flags
& KVM_DEV_IRQ_GUEST_MASK
);
534 /* can only assign one type at a time */
535 if (hweight_long(host_irq_type
) > 1)
537 if (hweight_long(guest_irq_type
) > 1)
539 if (host_irq_type
== 0 && guest_irq_type
== 0)
544 r
= assign_host_irq(kvm
, match
, host_irq_type
);
549 r
= assign_guest_irq(kvm
, match
, assigned_irq
, guest_irq_type
);
551 mutex_unlock(&kvm
->lock
);
555 static int kvm_vm_ioctl_deassign_dev_irq(struct kvm
*kvm
,
556 struct kvm_assigned_irq
560 struct kvm_assigned_dev_kernel
*match
;
562 mutex_lock(&kvm
->lock
);
564 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
565 assigned_irq
->assigned_dev_id
);
569 r
= kvm_deassign_irq(kvm
, match
, assigned_irq
->flags
);
571 mutex_unlock(&kvm
->lock
);
575 static int kvm_vm_ioctl_assign_device(struct kvm
*kvm
,
576 struct kvm_assigned_pci_dev
*assigned_dev
)
579 struct kvm_assigned_dev_kernel
*match
;
582 down_read(&kvm
->slots_lock
);
583 mutex_lock(&kvm
->lock
);
585 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
586 assigned_dev
->assigned_dev_id
);
588 /* device already assigned */
593 match
= kzalloc(sizeof(struct kvm_assigned_dev_kernel
), GFP_KERNEL
);
595 printk(KERN_INFO
"%s: Couldn't allocate memory\n",
600 dev
= pci_get_bus_and_slot(assigned_dev
->busnr
,
601 assigned_dev
->devfn
);
603 printk(KERN_INFO
"%s: host device not found\n", __func__
);
607 if (pci_enable_device(dev
)) {
608 printk(KERN_INFO
"%s: Could not enable PCI device\n", __func__
);
612 r
= pci_request_regions(dev
, "kvm_assigned_device");
614 printk(KERN_INFO
"%s: Could not get access to device regions\n",
619 pci_reset_function(dev
);
621 match
->assigned_dev_id
= assigned_dev
->assigned_dev_id
;
622 match
->host_busnr
= assigned_dev
->busnr
;
623 match
->host_devfn
= assigned_dev
->devfn
;
624 match
->flags
= assigned_dev
->flags
;
626 spin_lock_init(&match
->assigned_dev_lock
);
627 match
->irq_source_id
= -1;
629 match
->ack_notifier
.irq_acked
= kvm_assigned_dev_ack_irq
;
630 INIT_WORK(&match
->interrupt_work
,
631 kvm_assigned_dev_interrupt_work_handler
);
633 list_add(&match
->list
, &kvm
->arch
.assigned_dev_head
);
635 if (assigned_dev
->flags
& KVM_DEV_ASSIGN_ENABLE_IOMMU
) {
636 if (!kvm
->arch
.iommu_domain
) {
637 r
= kvm_iommu_map_guest(kvm
);
641 r
= kvm_assign_device(kvm
, match
);
647 mutex_unlock(&kvm
->lock
);
648 up_read(&kvm
->slots_lock
);
651 list_del(&match
->list
);
652 pci_release_regions(dev
);
654 pci_disable_device(dev
);
659 mutex_unlock(&kvm
->lock
);
660 up_read(&kvm
->slots_lock
);
665 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
666 static int kvm_vm_ioctl_deassign_device(struct kvm
*kvm
,
667 struct kvm_assigned_pci_dev
*assigned_dev
)
670 struct kvm_assigned_dev_kernel
*match
;
672 mutex_lock(&kvm
->lock
);
674 match
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
675 assigned_dev
->assigned_dev_id
);
677 printk(KERN_INFO
"%s: device hasn't been assigned before, "
678 "so cannot be deassigned\n", __func__
);
683 if (match
->flags
& KVM_DEV_ASSIGN_ENABLE_IOMMU
)
684 kvm_deassign_device(kvm
, match
);
686 kvm_free_assigned_device(kvm
, match
);
689 mutex_unlock(&kvm
->lock
);
694 inline int kvm_is_mmio_pfn(pfn_t pfn
)
696 if (pfn_valid(pfn
)) {
697 struct page
*page
= compound_head(pfn_to_page(pfn
));
698 return PageReserved(page
);
705 * Switches to specified vcpu, until a matching vcpu_put()
707 void vcpu_load(struct kvm_vcpu
*vcpu
)
711 mutex_lock(&vcpu
->mutex
);
713 preempt_notifier_register(&vcpu
->preempt_notifier
);
714 kvm_arch_vcpu_load(vcpu
, cpu
);
718 void vcpu_put(struct kvm_vcpu
*vcpu
)
721 kvm_arch_vcpu_put(vcpu
);
722 preempt_notifier_unregister(&vcpu
->preempt_notifier
);
724 mutex_unlock(&vcpu
->mutex
);
727 static void ack_flush(void *_completed
)
731 static bool make_all_cpus_request(struct kvm
*kvm
, unsigned int req
)
736 struct kvm_vcpu
*vcpu
;
738 if (alloc_cpumask_var(&cpus
, GFP_ATOMIC
))
742 spin_lock(&kvm
->requests_lock
);
743 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
744 if (test_and_set_bit(req
, &vcpu
->requests
))
747 if (cpus
!= NULL
&& cpu
!= -1 && cpu
!= me
)
748 cpumask_set_cpu(cpu
, cpus
);
750 if (unlikely(cpus
== NULL
))
751 smp_call_function_many(cpu_online_mask
, ack_flush
, NULL
, 1);
752 else if (!cpumask_empty(cpus
))
753 smp_call_function_many(cpus
, ack_flush
, NULL
, 1);
756 spin_unlock(&kvm
->requests_lock
);
758 free_cpumask_var(cpus
);
762 void kvm_flush_remote_tlbs(struct kvm
*kvm
)
764 if (make_all_cpus_request(kvm
, KVM_REQ_TLB_FLUSH
))
765 ++kvm
->stat
.remote_tlb_flush
;
768 void kvm_reload_remote_mmus(struct kvm
*kvm
)
770 make_all_cpus_request(kvm
, KVM_REQ_MMU_RELOAD
);
773 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
)
778 mutex_init(&vcpu
->mutex
);
782 init_waitqueue_head(&vcpu
->wq
);
784 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
789 vcpu
->run
= page_address(page
);
791 r
= kvm_arch_vcpu_init(vcpu
);
797 free_page((unsigned long)vcpu
->run
);
801 EXPORT_SYMBOL_GPL(kvm_vcpu_init
);
803 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
)
805 kvm_arch_vcpu_uninit(vcpu
);
806 free_page((unsigned long)vcpu
->run
);
808 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit
);
810 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
811 static inline struct kvm
*mmu_notifier_to_kvm(struct mmu_notifier
*mn
)
813 return container_of(mn
, struct kvm
, mmu_notifier
);
816 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier
*mn
,
817 struct mm_struct
*mm
,
818 unsigned long address
)
820 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
824 * When ->invalidate_page runs, the linux pte has been zapped
825 * already but the page is still allocated until
826 * ->invalidate_page returns. So if we increase the sequence
827 * here the kvm page fault will notice if the spte can't be
828 * established because the page is going to be freed. If
829 * instead the kvm page fault establishes the spte before
830 * ->invalidate_page runs, kvm_unmap_hva will release it
833 * The sequence increase only need to be seen at spin_unlock
834 * time, and not at spin_lock time.
836 * Increasing the sequence after the spin_unlock would be
837 * unsafe because the kvm page fault could then establish the
838 * pte after kvm_unmap_hva returned, without noticing the page
839 * is going to be freed.
841 spin_lock(&kvm
->mmu_lock
);
842 kvm
->mmu_notifier_seq
++;
843 need_tlb_flush
= kvm_unmap_hva(kvm
, address
);
844 spin_unlock(&kvm
->mmu_lock
);
846 /* we've to flush the tlb before the pages can be freed */
848 kvm_flush_remote_tlbs(kvm
);
852 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier
*mn
,
853 struct mm_struct
*mm
,
857 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
858 int need_tlb_flush
= 0;
860 spin_lock(&kvm
->mmu_lock
);
862 * The count increase must become visible at unlock time as no
863 * spte can be established without taking the mmu_lock and
864 * count is also read inside the mmu_lock critical section.
866 kvm
->mmu_notifier_count
++;
867 for (; start
< end
; start
+= PAGE_SIZE
)
868 need_tlb_flush
|= kvm_unmap_hva(kvm
, start
);
869 spin_unlock(&kvm
->mmu_lock
);
871 /* we've to flush the tlb before the pages can be freed */
873 kvm_flush_remote_tlbs(kvm
);
876 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier
*mn
,
877 struct mm_struct
*mm
,
881 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
883 spin_lock(&kvm
->mmu_lock
);
885 * This sequence increase will notify the kvm page fault that
886 * the page that is going to be mapped in the spte could have
889 kvm
->mmu_notifier_seq
++;
891 * The above sequence increase must be visible before the
892 * below count decrease but both values are read by the kvm
893 * page fault under mmu_lock spinlock so we don't need to add
894 * a smb_wmb() here in between the two.
896 kvm
->mmu_notifier_count
--;
897 spin_unlock(&kvm
->mmu_lock
);
899 BUG_ON(kvm
->mmu_notifier_count
< 0);
902 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier
*mn
,
903 struct mm_struct
*mm
,
904 unsigned long address
)
906 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
909 spin_lock(&kvm
->mmu_lock
);
910 young
= kvm_age_hva(kvm
, address
);
911 spin_unlock(&kvm
->mmu_lock
);
914 kvm_flush_remote_tlbs(kvm
);
919 static void kvm_mmu_notifier_release(struct mmu_notifier
*mn
,
920 struct mm_struct
*mm
)
922 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
923 kvm_arch_flush_shadow(kvm
);
926 static const struct mmu_notifier_ops kvm_mmu_notifier_ops
= {
927 .invalidate_page
= kvm_mmu_notifier_invalidate_page
,
928 .invalidate_range_start
= kvm_mmu_notifier_invalidate_range_start
,
929 .invalidate_range_end
= kvm_mmu_notifier_invalidate_range_end
,
930 .clear_flush_young
= kvm_mmu_notifier_clear_flush_young
,
931 .release
= kvm_mmu_notifier_release
,
933 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
935 static struct kvm
*kvm_create_vm(void)
937 struct kvm
*kvm
= kvm_arch_create_vm();
938 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
944 #ifdef CONFIG_HAVE_KVM_IRQCHIP
945 INIT_LIST_HEAD(&kvm
->irq_routing
);
946 INIT_HLIST_HEAD(&kvm
->mask_notifier_list
);
949 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
950 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
953 return ERR_PTR(-ENOMEM
);
955 kvm
->coalesced_mmio_ring
=
956 (struct kvm_coalesced_mmio_ring
*)page_address(page
);
959 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
962 kvm
->mmu_notifier
.ops
= &kvm_mmu_notifier_ops
;
963 err
= mmu_notifier_register(&kvm
->mmu_notifier
, current
->mm
);
965 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
974 kvm
->mm
= current
->mm
;
975 atomic_inc(&kvm
->mm
->mm_count
);
976 spin_lock_init(&kvm
->mmu_lock
);
977 spin_lock_init(&kvm
->requests_lock
);
978 kvm_io_bus_init(&kvm
->pio_bus
);
980 mutex_init(&kvm
->lock
);
981 mutex_init(&kvm
->irq_lock
);
982 kvm_io_bus_init(&kvm
->mmio_bus
);
983 init_rwsem(&kvm
->slots_lock
);
984 atomic_set(&kvm
->users_count
, 1);
985 spin_lock(&kvm_lock
);
986 list_add(&kvm
->vm_list
, &vm_list
);
987 spin_unlock(&kvm_lock
);
988 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
989 kvm_coalesced_mmio_init(kvm
);
996 * Free any memory in @free but not in @dont.
998 static void kvm_free_physmem_slot(struct kvm_memory_slot
*free
,
999 struct kvm_memory_slot
*dont
)
1001 if (!dont
|| free
->rmap
!= dont
->rmap
)
1004 if (!dont
|| free
->dirty_bitmap
!= dont
->dirty_bitmap
)
1005 vfree(free
->dirty_bitmap
);
1007 if (!dont
|| free
->lpage_info
!= dont
->lpage_info
)
1008 vfree(free
->lpage_info
);
1011 free
->dirty_bitmap
= NULL
;
1013 free
->lpage_info
= NULL
;
1016 void kvm_free_physmem(struct kvm
*kvm
)
1020 for (i
= 0; i
< kvm
->nmemslots
; ++i
)
1021 kvm_free_physmem_slot(&kvm
->memslots
[i
], NULL
);
1024 static void kvm_destroy_vm(struct kvm
*kvm
)
1026 struct mm_struct
*mm
= kvm
->mm
;
1028 kvm_arch_sync_events(kvm
);
1029 spin_lock(&kvm_lock
);
1030 list_del(&kvm
->vm_list
);
1031 spin_unlock(&kvm_lock
);
1032 kvm_free_irq_routing(kvm
);
1033 kvm_io_bus_destroy(&kvm
->pio_bus
);
1034 kvm_io_bus_destroy(&kvm
->mmio_bus
);
1035 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1036 if (kvm
->coalesced_mmio_ring
!= NULL
)
1037 free_page((unsigned long)kvm
->coalesced_mmio_ring
);
1039 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1040 mmu_notifier_unregister(&kvm
->mmu_notifier
, kvm
->mm
);
1042 kvm_arch_flush_shadow(kvm
);
1044 kvm_arch_destroy_vm(kvm
);
1048 void kvm_get_kvm(struct kvm
*kvm
)
1050 atomic_inc(&kvm
->users_count
);
1052 EXPORT_SYMBOL_GPL(kvm_get_kvm
);
1054 void kvm_put_kvm(struct kvm
*kvm
)
1056 if (atomic_dec_and_test(&kvm
->users_count
))
1057 kvm_destroy_vm(kvm
);
1059 EXPORT_SYMBOL_GPL(kvm_put_kvm
);
1062 static int kvm_vm_release(struct inode
*inode
, struct file
*filp
)
1064 struct kvm
*kvm
= filp
->private_data
;
1066 kvm_irqfd_release(kvm
);
1073 * Allocate some memory and give it an address in the guest physical address
1076 * Discontiguous memory is allowed, mostly for framebuffers.
1078 * Must be called holding mmap_sem for write.
1080 int __kvm_set_memory_region(struct kvm
*kvm
,
1081 struct kvm_userspace_memory_region
*mem
,
1086 unsigned long npages
, ugfn
;
1087 unsigned long largepages
, i
;
1088 struct kvm_memory_slot
*memslot
;
1089 struct kvm_memory_slot old
, new;
1092 /* General sanity checks */
1093 if (mem
->memory_size
& (PAGE_SIZE
- 1))
1095 if (mem
->guest_phys_addr
& (PAGE_SIZE
- 1))
1097 if (user_alloc
&& (mem
->userspace_addr
& (PAGE_SIZE
- 1)))
1099 if (mem
->slot
>= KVM_MEMORY_SLOTS
+ KVM_PRIVATE_MEM_SLOTS
)
1101 if (mem
->guest_phys_addr
+ mem
->memory_size
< mem
->guest_phys_addr
)
1104 memslot
= &kvm
->memslots
[mem
->slot
];
1105 base_gfn
= mem
->guest_phys_addr
>> PAGE_SHIFT
;
1106 npages
= mem
->memory_size
>> PAGE_SHIFT
;
1109 mem
->flags
&= ~KVM_MEM_LOG_DIRTY_PAGES
;
1111 new = old
= *memslot
;
1113 new.base_gfn
= base_gfn
;
1114 new.npages
= npages
;
1115 new.flags
= mem
->flags
;
1117 /* Disallow changing a memory slot's size. */
1119 if (npages
&& old
.npages
&& npages
!= old
.npages
)
1122 /* Check for overlaps */
1124 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
1125 struct kvm_memory_slot
*s
= &kvm
->memslots
[i
];
1127 if (s
== memslot
|| !s
->npages
)
1129 if (!((base_gfn
+ npages
<= s
->base_gfn
) ||
1130 (base_gfn
>= s
->base_gfn
+ s
->npages
)))
1134 /* Free page dirty bitmap if unneeded */
1135 if (!(new.flags
& KVM_MEM_LOG_DIRTY_PAGES
))
1136 new.dirty_bitmap
= NULL
;
1140 /* Allocate if a slot is being created */
1142 if (npages
&& !new.rmap
) {
1143 new.rmap
= vmalloc(npages
* sizeof(struct page
*));
1148 memset(new.rmap
, 0, npages
* sizeof(*new.rmap
));
1150 new.user_alloc
= user_alloc
;
1152 * hva_to_rmmap() serialzies with the mmu_lock and to be
1153 * safe it has to ignore memslots with !user_alloc &&
1157 new.userspace_addr
= mem
->userspace_addr
;
1159 new.userspace_addr
= 0;
1161 if (npages
&& !new.lpage_info
) {
1162 largepages
= 1 + (base_gfn
+ npages
- 1) / KVM_PAGES_PER_HPAGE
;
1163 largepages
-= base_gfn
/ KVM_PAGES_PER_HPAGE
;
1165 new.lpage_info
= vmalloc(largepages
* sizeof(*new.lpage_info
));
1167 if (!new.lpage_info
)
1170 memset(new.lpage_info
, 0, largepages
* sizeof(*new.lpage_info
));
1172 if (base_gfn
% KVM_PAGES_PER_HPAGE
)
1173 new.lpage_info
[0].write_count
= 1;
1174 if ((base_gfn
+npages
) % KVM_PAGES_PER_HPAGE
)
1175 new.lpage_info
[largepages
-1].write_count
= 1;
1176 ugfn
= new.userspace_addr
>> PAGE_SHIFT
;
1178 * If the gfn and userspace address are not aligned wrt each
1179 * other, or if explicitly asked to, disable large page
1180 * support for this slot
1182 if ((base_gfn
^ ugfn
) & (KVM_PAGES_PER_HPAGE
- 1) ||
1183 !largepages_enabled
)
1184 for (i
= 0; i
< largepages
; ++i
)
1185 new.lpage_info
[i
].write_count
= 1;
1188 /* Allocate page dirty bitmap if needed */
1189 if ((new.flags
& KVM_MEM_LOG_DIRTY_PAGES
) && !new.dirty_bitmap
) {
1190 unsigned dirty_bytes
= ALIGN(npages
, BITS_PER_LONG
) / 8;
1192 new.dirty_bitmap
= vmalloc(dirty_bytes
);
1193 if (!new.dirty_bitmap
)
1195 memset(new.dirty_bitmap
, 0, dirty_bytes
);
1197 kvm_arch_flush_shadow(kvm
);
1199 #endif /* not defined CONFIG_S390 */
1202 kvm_arch_flush_shadow(kvm
);
1204 spin_lock(&kvm
->mmu_lock
);
1205 if (mem
->slot
>= kvm
->nmemslots
)
1206 kvm
->nmemslots
= mem
->slot
+ 1;
1209 spin_unlock(&kvm
->mmu_lock
);
1211 r
= kvm_arch_set_memory_region(kvm
, mem
, old
, user_alloc
);
1213 spin_lock(&kvm
->mmu_lock
);
1215 spin_unlock(&kvm
->mmu_lock
);
1219 kvm_free_physmem_slot(&old
, npages
? &new : NULL
);
1220 /* Slot deletion case: we have to update the current slot */
1221 spin_lock(&kvm
->mmu_lock
);
1224 spin_unlock(&kvm
->mmu_lock
);
1226 /* map the pages in iommu page table */
1227 r
= kvm_iommu_map_pages(kvm
, base_gfn
, npages
);
1234 kvm_free_physmem_slot(&new, &old
);
1239 EXPORT_SYMBOL_GPL(__kvm_set_memory_region
);
1241 int kvm_set_memory_region(struct kvm
*kvm
,
1242 struct kvm_userspace_memory_region
*mem
,
1247 down_write(&kvm
->slots_lock
);
1248 r
= __kvm_set_memory_region(kvm
, mem
, user_alloc
);
1249 up_write(&kvm
->slots_lock
);
1252 EXPORT_SYMBOL_GPL(kvm_set_memory_region
);
1254 int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
1256 kvm_userspace_memory_region
*mem
,
1259 if (mem
->slot
>= KVM_MEMORY_SLOTS
)
1261 return kvm_set_memory_region(kvm
, mem
, user_alloc
);
1264 int kvm_get_dirty_log(struct kvm
*kvm
,
1265 struct kvm_dirty_log
*log
, int *is_dirty
)
1267 struct kvm_memory_slot
*memslot
;
1270 unsigned long any
= 0;
1273 if (log
->slot
>= KVM_MEMORY_SLOTS
)
1276 memslot
= &kvm
->memslots
[log
->slot
];
1278 if (!memslot
->dirty_bitmap
)
1281 n
= ALIGN(memslot
->npages
, BITS_PER_LONG
) / 8;
1283 for (i
= 0; !any
&& i
< n
/sizeof(long); ++i
)
1284 any
= memslot
->dirty_bitmap
[i
];
1287 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
1298 void kvm_disable_largepages(void)
1300 largepages_enabled
= false;
1302 EXPORT_SYMBOL_GPL(kvm_disable_largepages
);
1304 int is_error_page(struct page
*page
)
1306 return page
== bad_page
;
1308 EXPORT_SYMBOL_GPL(is_error_page
);
1310 int is_error_pfn(pfn_t pfn
)
1312 return pfn
== bad_pfn
;
1314 EXPORT_SYMBOL_GPL(is_error_pfn
);
1316 static inline unsigned long bad_hva(void)
1321 int kvm_is_error_hva(unsigned long addr
)
1323 return addr
== bad_hva();
1325 EXPORT_SYMBOL_GPL(kvm_is_error_hva
);
1327 struct kvm_memory_slot
*gfn_to_memslot_unaliased(struct kvm
*kvm
, gfn_t gfn
)
1331 for (i
= 0; i
< kvm
->nmemslots
; ++i
) {
1332 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
1334 if (gfn
>= memslot
->base_gfn
1335 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
1340 EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased
);
1342 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
1344 gfn
= unalias_gfn(kvm
, gfn
);
1345 return gfn_to_memslot_unaliased(kvm
, gfn
);
1348 int kvm_is_visible_gfn(struct kvm
*kvm
, gfn_t gfn
)
1352 gfn
= unalias_gfn(kvm
, gfn
);
1353 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
1354 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
1356 if (gfn
>= memslot
->base_gfn
1357 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
1362 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn
);
1364 unsigned long gfn_to_hva(struct kvm
*kvm
, gfn_t gfn
)
1366 struct kvm_memory_slot
*slot
;
1368 gfn
= unalias_gfn(kvm
, gfn
);
1369 slot
= gfn_to_memslot_unaliased(kvm
, gfn
);
1372 return (slot
->userspace_addr
+ (gfn
- slot
->base_gfn
) * PAGE_SIZE
);
1374 EXPORT_SYMBOL_GPL(gfn_to_hva
);
1376 pfn_t
gfn_to_pfn(struct kvm
*kvm
, gfn_t gfn
)
1378 struct page
*page
[1];
1385 addr
= gfn_to_hva(kvm
, gfn
);
1386 if (kvm_is_error_hva(addr
)) {
1388 return page_to_pfn(bad_page
);
1391 npages
= get_user_pages_fast(addr
, 1, 1, page
);
1393 if (unlikely(npages
!= 1)) {
1394 struct vm_area_struct
*vma
;
1396 down_read(¤t
->mm
->mmap_sem
);
1397 vma
= find_vma(current
->mm
, addr
);
1399 if (vma
== NULL
|| addr
< vma
->vm_start
||
1400 !(vma
->vm_flags
& VM_PFNMAP
)) {
1401 up_read(¤t
->mm
->mmap_sem
);
1403 return page_to_pfn(bad_page
);
1406 pfn
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1407 up_read(¤t
->mm
->mmap_sem
);
1408 BUG_ON(!kvm_is_mmio_pfn(pfn
));
1410 pfn
= page_to_pfn(page
[0]);
1415 EXPORT_SYMBOL_GPL(gfn_to_pfn
);
1417 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
)
1421 pfn
= gfn_to_pfn(kvm
, gfn
);
1422 if (!kvm_is_mmio_pfn(pfn
))
1423 return pfn_to_page(pfn
);
1425 WARN_ON(kvm_is_mmio_pfn(pfn
));
1431 EXPORT_SYMBOL_GPL(gfn_to_page
);
1433 void kvm_release_page_clean(struct page
*page
)
1435 kvm_release_pfn_clean(page_to_pfn(page
));
1437 EXPORT_SYMBOL_GPL(kvm_release_page_clean
);
1439 void kvm_release_pfn_clean(pfn_t pfn
)
1441 if (!kvm_is_mmio_pfn(pfn
))
1442 put_page(pfn_to_page(pfn
));
1444 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean
);
1446 void kvm_release_page_dirty(struct page
*page
)
1448 kvm_release_pfn_dirty(page_to_pfn(page
));
1450 EXPORT_SYMBOL_GPL(kvm_release_page_dirty
);
1452 void kvm_release_pfn_dirty(pfn_t pfn
)
1454 kvm_set_pfn_dirty(pfn
);
1455 kvm_release_pfn_clean(pfn
);
1457 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty
);
1459 void kvm_set_page_dirty(struct page
*page
)
1461 kvm_set_pfn_dirty(page_to_pfn(page
));
1463 EXPORT_SYMBOL_GPL(kvm_set_page_dirty
);
1465 void kvm_set_pfn_dirty(pfn_t pfn
)
1467 if (!kvm_is_mmio_pfn(pfn
)) {
1468 struct page
*page
= pfn_to_page(pfn
);
1469 if (!PageReserved(page
))
1473 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty
);
1475 void kvm_set_pfn_accessed(pfn_t pfn
)
1477 if (!kvm_is_mmio_pfn(pfn
))
1478 mark_page_accessed(pfn_to_page(pfn
));
1480 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed
);
1482 void kvm_get_pfn(pfn_t pfn
)
1484 if (!kvm_is_mmio_pfn(pfn
))
1485 get_page(pfn_to_page(pfn
));
1487 EXPORT_SYMBOL_GPL(kvm_get_pfn
);
1489 static int next_segment(unsigned long len
, int offset
)
1491 if (len
> PAGE_SIZE
- offset
)
1492 return PAGE_SIZE
- offset
;
1497 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
1503 addr
= gfn_to_hva(kvm
, gfn
);
1504 if (kvm_is_error_hva(addr
))
1506 r
= copy_from_user(data
, (void __user
*)addr
+ offset
, len
);
1511 EXPORT_SYMBOL_GPL(kvm_read_guest_page
);
1513 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
)
1515 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1517 int offset
= offset_in_page(gpa
);
1520 while ((seg
= next_segment(len
, offset
)) != 0) {
1521 ret
= kvm_read_guest_page(kvm
, gfn
, data
, offset
, seg
);
1531 EXPORT_SYMBOL_GPL(kvm_read_guest
);
1533 int kvm_read_guest_atomic(struct kvm
*kvm
, gpa_t gpa
, void *data
,
1538 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1539 int offset
= offset_in_page(gpa
);
1541 addr
= gfn_to_hva(kvm
, gfn
);
1542 if (kvm_is_error_hva(addr
))
1544 pagefault_disable();
1545 r
= __copy_from_user_inatomic(data
, (void __user
*)addr
+ offset
, len
);
1551 EXPORT_SYMBOL(kvm_read_guest_atomic
);
1553 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
, const void *data
,
1554 int offset
, int len
)
1559 addr
= gfn_to_hva(kvm
, gfn
);
1560 if (kvm_is_error_hva(addr
))
1562 r
= copy_to_user((void __user
*)addr
+ offset
, data
, len
);
1565 mark_page_dirty(kvm
, gfn
);
1568 EXPORT_SYMBOL_GPL(kvm_write_guest_page
);
1570 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
1573 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1575 int offset
= offset_in_page(gpa
);
1578 while ((seg
= next_segment(len
, offset
)) != 0) {
1579 ret
= kvm_write_guest_page(kvm
, gfn
, data
, offset
, seg
);
1590 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
)
1592 return kvm_write_guest_page(kvm
, gfn
, empty_zero_page
, offset
, len
);
1594 EXPORT_SYMBOL_GPL(kvm_clear_guest_page
);
1596 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
)
1598 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1600 int offset
= offset_in_page(gpa
);
1603 while ((seg
= next_segment(len
, offset
)) != 0) {
1604 ret
= kvm_clear_guest_page(kvm
, gfn
, offset
, seg
);
1613 EXPORT_SYMBOL_GPL(kvm_clear_guest
);
1615 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
)
1617 struct kvm_memory_slot
*memslot
;
1619 gfn
= unalias_gfn(kvm
, gfn
);
1620 memslot
= gfn_to_memslot_unaliased(kvm
, gfn
);
1621 if (memslot
&& memslot
->dirty_bitmap
) {
1622 unsigned long rel_gfn
= gfn
- memslot
->base_gfn
;
1625 if (!test_bit(rel_gfn
, memslot
->dirty_bitmap
))
1626 set_bit(rel_gfn
, memslot
->dirty_bitmap
);
1631 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1633 void kvm_vcpu_block(struct kvm_vcpu
*vcpu
)
1638 prepare_to_wait(&vcpu
->wq
, &wait
, TASK_INTERRUPTIBLE
);
1640 if ((kvm_arch_interrupt_allowed(vcpu
) &&
1641 kvm_cpu_has_interrupt(vcpu
)) ||
1642 kvm_arch_vcpu_runnable(vcpu
)) {
1643 set_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
1646 if (kvm_cpu_has_pending_timer(vcpu
))
1648 if (signal_pending(current
))
1656 finish_wait(&vcpu
->wq
, &wait
);
1659 void kvm_resched(struct kvm_vcpu
*vcpu
)
1661 if (!need_resched())
1665 EXPORT_SYMBOL_GPL(kvm_resched
);
1667 static int kvm_vcpu_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1669 struct kvm_vcpu
*vcpu
= vma
->vm_file
->private_data
;
1672 if (vmf
->pgoff
== 0)
1673 page
= virt_to_page(vcpu
->run
);
1675 else if (vmf
->pgoff
== KVM_PIO_PAGE_OFFSET
)
1676 page
= virt_to_page(vcpu
->arch
.pio_data
);
1678 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1679 else if (vmf
->pgoff
== KVM_COALESCED_MMIO_PAGE_OFFSET
)
1680 page
= virt_to_page(vcpu
->kvm
->coalesced_mmio_ring
);
1683 return VM_FAULT_SIGBUS
;
1689 static struct vm_operations_struct kvm_vcpu_vm_ops
= {
1690 .fault
= kvm_vcpu_fault
,
1693 static int kvm_vcpu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1695 vma
->vm_ops
= &kvm_vcpu_vm_ops
;
1699 static int kvm_vcpu_release(struct inode
*inode
, struct file
*filp
)
1701 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1703 kvm_put_kvm(vcpu
->kvm
);
1707 static struct file_operations kvm_vcpu_fops
= {
1708 .release
= kvm_vcpu_release
,
1709 .unlocked_ioctl
= kvm_vcpu_ioctl
,
1710 .compat_ioctl
= kvm_vcpu_ioctl
,
1711 .mmap
= kvm_vcpu_mmap
,
1715 * Allocates an inode for the vcpu.
1717 static int create_vcpu_fd(struct kvm_vcpu
*vcpu
)
1719 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops
, vcpu
, 0);
1723 * Creates some virtual cpus. Good luck creating more than one.
1725 static int kvm_vm_ioctl_create_vcpu(struct kvm
*kvm
, u32 id
)
1728 struct kvm_vcpu
*vcpu
, *v
;
1730 vcpu
= kvm_arch_vcpu_create(kvm
, id
);
1732 return PTR_ERR(vcpu
);
1734 preempt_notifier_init(&vcpu
->preempt_notifier
, &kvm_preempt_ops
);
1736 r
= kvm_arch_vcpu_setup(vcpu
);
1740 mutex_lock(&kvm
->lock
);
1741 if (atomic_read(&kvm
->online_vcpus
) == KVM_MAX_VCPUS
) {
1746 kvm_for_each_vcpu(r
, v
, kvm
)
1747 if (v
->vcpu_id
== id
) {
1752 BUG_ON(kvm
->vcpus
[atomic_read(&kvm
->online_vcpus
)]);
1754 /* Now it's all set up, let userspace reach it */
1756 r
= create_vcpu_fd(vcpu
);
1762 kvm
->vcpus
[atomic_read(&kvm
->online_vcpus
)] = vcpu
;
1764 atomic_inc(&kvm
->online_vcpus
);
1766 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1767 if (kvm
->bsp_vcpu_id
== id
)
1768 kvm
->bsp_vcpu
= vcpu
;
1770 mutex_unlock(&kvm
->lock
);
1774 mutex_unlock(&kvm
->lock
);
1775 kvm_arch_vcpu_destroy(vcpu
);
1779 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu
*vcpu
, sigset_t
*sigset
)
1782 sigdelsetmask(sigset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1783 vcpu
->sigset_active
= 1;
1784 vcpu
->sigset
= *sigset
;
1786 vcpu
->sigset_active
= 0;
1790 #ifdef __KVM_HAVE_MSIX
1791 static int kvm_vm_ioctl_set_msix_nr(struct kvm
*kvm
,
1792 struct kvm_assigned_msix_nr
*entry_nr
)
1795 struct kvm_assigned_dev_kernel
*adev
;
1797 mutex_lock(&kvm
->lock
);
1799 adev
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
1800 entry_nr
->assigned_dev_id
);
1806 if (adev
->entries_nr
== 0) {
1807 adev
->entries_nr
= entry_nr
->entry_nr
;
1808 if (adev
->entries_nr
== 0 ||
1809 adev
->entries_nr
>= KVM_MAX_MSIX_PER_DEV
) {
1814 adev
->host_msix_entries
= kzalloc(sizeof(struct msix_entry
) *
1817 if (!adev
->host_msix_entries
) {
1821 adev
->guest_msix_entries
= kzalloc(
1822 sizeof(struct kvm_guest_msix_entry
) *
1823 entry_nr
->entry_nr
, GFP_KERNEL
);
1824 if (!adev
->guest_msix_entries
) {
1825 kfree(adev
->host_msix_entries
);
1829 } else /* Not allowed set MSI-X number twice */
1832 mutex_unlock(&kvm
->lock
);
1836 static int kvm_vm_ioctl_set_msix_entry(struct kvm
*kvm
,
1837 struct kvm_assigned_msix_entry
*entry
)
1840 struct kvm_assigned_dev_kernel
*adev
;
1842 mutex_lock(&kvm
->lock
);
1844 adev
= kvm_find_assigned_dev(&kvm
->arch
.assigned_dev_head
,
1845 entry
->assigned_dev_id
);
1849 goto msix_entry_out
;
1852 for (i
= 0; i
< adev
->entries_nr
; i
++)
1853 if (adev
->guest_msix_entries
[i
].vector
== 0 ||
1854 adev
->guest_msix_entries
[i
].entry
== entry
->entry
) {
1855 adev
->guest_msix_entries
[i
].entry
= entry
->entry
;
1856 adev
->guest_msix_entries
[i
].vector
= entry
->gsi
;
1857 adev
->host_msix_entries
[i
].entry
= entry
->entry
;
1860 if (i
== adev
->entries_nr
) {
1862 goto msix_entry_out
;
1866 mutex_unlock(&kvm
->lock
);
1872 static long kvm_vcpu_ioctl(struct file
*filp
,
1873 unsigned int ioctl
, unsigned long arg
)
1875 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1876 void __user
*argp
= (void __user
*)arg
;
1878 struct kvm_fpu
*fpu
= NULL
;
1879 struct kvm_sregs
*kvm_sregs
= NULL
;
1881 if (vcpu
->kvm
->mm
!= current
->mm
)
1888 r
= kvm_arch_vcpu_ioctl_run(vcpu
, vcpu
->run
);
1890 case KVM_GET_REGS
: {
1891 struct kvm_regs
*kvm_regs
;
1894 kvm_regs
= kzalloc(sizeof(struct kvm_regs
), GFP_KERNEL
);
1897 r
= kvm_arch_vcpu_ioctl_get_regs(vcpu
, kvm_regs
);
1901 if (copy_to_user(argp
, kvm_regs
, sizeof(struct kvm_regs
)))
1908 case KVM_SET_REGS
: {
1909 struct kvm_regs
*kvm_regs
;
1912 kvm_regs
= kzalloc(sizeof(struct kvm_regs
), GFP_KERNEL
);
1916 if (copy_from_user(kvm_regs
, argp
, sizeof(struct kvm_regs
)))
1918 r
= kvm_arch_vcpu_ioctl_set_regs(vcpu
, kvm_regs
);
1926 case KVM_GET_SREGS
: {
1927 kvm_sregs
= kzalloc(sizeof(struct kvm_sregs
), GFP_KERNEL
);
1931 r
= kvm_arch_vcpu_ioctl_get_sregs(vcpu
, kvm_sregs
);
1935 if (copy_to_user(argp
, kvm_sregs
, sizeof(struct kvm_sregs
)))
1940 case KVM_SET_SREGS
: {
1941 kvm_sregs
= kmalloc(sizeof(struct kvm_sregs
), GFP_KERNEL
);
1946 if (copy_from_user(kvm_sregs
, argp
, sizeof(struct kvm_sregs
)))
1948 r
= kvm_arch_vcpu_ioctl_set_sregs(vcpu
, kvm_sregs
);
1954 case KVM_GET_MP_STATE
: {
1955 struct kvm_mp_state mp_state
;
1957 r
= kvm_arch_vcpu_ioctl_get_mpstate(vcpu
, &mp_state
);
1961 if (copy_to_user(argp
, &mp_state
, sizeof mp_state
))
1966 case KVM_SET_MP_STATE
: {
1967 struct kvm_mp_state mp_state
;
1970 if (copy_from_user(&mp_state
, argp
, sizeof mp_state
))
1972 r
= kvm_arch_vcpu_ioctl_set_mpstate(vcpu
, &mp_state
);
1978 case KVM_TRANSLATE
: {
1979 struct kvm_translation tr
;
1982 if (copy_from_user(&tr
, argp
, sizeof tr
))
1984 r
= kvm_arch_vcpu_ioctl_translate(vcpu
, &tr
);
1988 if (copy_to_user(argp
, &tr
, sizeof tr
))
1993 case KVM_SET_GUEST_DEBUG
: {
1994 struct kvm_guest_debug dbg
;
1997 if (copy_from_user(&dbg
, argp
, sizeof dbg
))
1999 r
= kvm_arch_vcpu_ioctl_set_guest_debug(vcpu
, &dbg
);
2005 case KVM_SET_SIGNAL_MASK
: {
2006 struct kvm_signal_mask __user
*sigmask_arg
= argp
;
2007 struct kvm_signal_mask kvm_sigmask
;
2008 sigset_t sigset
, *p
;
2013 if (copy_from_user(&kvm_sigmask
, argp
,
2014 sizeof kvm_sigmask
))
2017 if (kvm_sigmask
.len
!= sizeof sigset
)
2020 if (copy_from_user(&sigset
, sigmask_arg
->sigset
,
2025 r
= kvm_vcpu_ioctl_set_sigmask(vcpu
, &sigset
);
2029 fpu
= kzalloc(sizeof(struct kvm_fpu
), GFP_KERNEL
);
2033 r
= kvm_arch_vcpu_ioctl_get_fpu(vcpu
, fpu
);
2037 if (copy_to_user(argp
, fpu
, sizeof(struct kvm_fpu
)))
2043 fpu
= kmalloc(sizeof(struct kvm_fpu
), GFP_KERNEL
);
2048 if (copy_from_user(fpu
, argp
, sizeof(struct kvm_fpu
)))
2050 r
= kvm_arch_vcpu_ioctl_set_fpu(vcpu
, fpu
);
2057 r
= kvm_arch_vcpu_ioctl(filp
, ioctl
, arg
);
2065 static long kvm_vm_ioctl(struct file
*filp
,
2066 unsigned int ioctl
, unsigned long arg
)
2068 struct kvm
*kvm
= filp
->private_data
;
2069 void __user
*argp
= (void __user
*)arg
;
2072 if (kvm
->mm
!= current
->mm
)
2075 case KVM_CREATE_VCPU
:
2076 r
= kvm_vm_ioctl_create_vcpu(kvm
, arg
);
2080 case KVM_SET_USER_MEMORY_REGION
: {
2081 struct kvm_userspace_memory_region kvm_userspace_mem
;
2084 if (copy_from_user(&kvm_userspace_mem
, argp
,
2085 sizeof kvm_userspace_mem
))
2088 r
= kvm_vm_ioctl_set_memory_region(kvm
, &kvm_userspace_mem
, 1);
2093 case KVM_GET_DIRTY_LOG
: {
2094 struct kvm_dirty_log log
;
2097 if (copy_from_user(&log
, argp
, sizeof log
))
2099 r
= kvm_vm_ioctl_get_dirty_log(kvm
, &log
);
2104 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2105 case KVM_REGISTER_COALESCED_MMIO
: {
2106 struct kvm_coalesced_mmio_zone zone
;
2108 if (copy_from_user(&zone
, argp
, sizeof zone
))
2111 r
= kvm_vm_ioctl_register_coalesced_mmio(kvm
, &zone
);
2117 case KVM_UNREGISTER_COALESCED_MMIO
: {
2118 struct kvm_coalesced_mmio_zone zone
;
2120 if (copy_from_user(&zone
, argp
, sizeof zone
))
2123 r
= kvm_vm_ioctl_unregister_coalesced_mmio(kvm
, &zone
);
2130 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
2131 case KVM_ASSIGN_PCI_DEVICE
: {
2132 struct kvm_assigned_pci_dev assigned_dev
;
2135 if (copy_from_user(&assigned_dev
, argp
, sizeof assigned_dev
))
2137 r
= kvm_vm_ioctl_assign_device(kvm
, &assigned_dev
);
2142 case KVM_ASSIGN_IRQ
: {
2146 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
2147 case KVM_ASSIGN_DEV_IRQ
: {
2148 struct kvm_assigned_irq assigned_irq
;
2151 if (copy_from_user(&assigned_irq
, argp
, sizeof assigned_irq
))
2153 r
= kvm_vm_ioctl_assign_irq(kvm
, &assigned_irq
);
2158 case KVM_DEASSIGN_DEV_IRQ
: {
2159 struct kvm_assigned_irq assigned_irq
;
2162 if (copy_from_user(&assigned_irq
, argp
, sizeof assigned_irq
))
2164 r
= kvm_vm_ioctl_deassign_dev_irq(kvm
, &assigned_irq
);
2171 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
2172 case KVM_DEASSIGN_PCI_DEVICE
: {
2173 struct kvm_assigned_pci_dev assigned_dev
;
2176 if (copy_from_user(&assigned_dev
, argp
, sizeof assigned_dev
))
2178 r
= kvm_vm_ioctl_deassign_device(kvm
, &assigned_dev
);
2184 #ifdef KVM_CAP_IRQ_ROUTING
2185 case KVM_SET_GSI_ROUTING
: {
2186 struct kvm_irq_routing routing
;
2187 struct kvm_irq_routing __user
*urouting
;
2188 struct kvm_irq_routing_entry
*entries
;
2191 if (copy_from_user(&routing
, argp
, sizeof(routing
)))
2194 if (routing
.nr
>= KVM_MAX_IRQ_ROUTES
)
2199 entries
= vmalloc(routing
.nr
* sizeof(*entries
));
2204 if (copy_from_user(entries
, urouting
->entries
,
2205 routing
.nr
* sizeof(*entries
)))
2206 goto out_free_irq_routing
;
2207 r
= kvm_set_irq_routing(kvm
, entries
, routing
.nr
,
2209 out_free_irq_routing
:
2213 #ifdef __KVM_HAVE_MSIX
2214 case KVM_ASSIGN_SET_MSIX_NR
: {
2215 struct kvm_assigned_msix_nr entry_nr
;
2217 if (copy_from_user(&entry_nr
, argp
, sizeof entry_nr
))
2219 r
= kvm_vm_ioctl_set_msix_nr(kvm
, &entry_nr
);
2224 case KVM_ASSIGN_SET_MSIX_ENTRY
: {
2225 struct kvm_assigned_msix_entry entry
;
2227 if (copy_from_user(&entry
, argp
, sizeof entry
))
2229 r
= kvm_vm_ioctl_set_msix_entry(kvm
, &entry
);
2235 #endif /* KVM_CAP_IRQ_ROUTING */
2237 struct kvm_irqfd data
;
2240 if (copy_from_user(&data
, argp
, sizeof data
))
2242 r
= kvm_irqfd(kvm
, data
.fd
, data
.gsi
, data
.flags
);
2245 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
2246 case KVM_SET_BOOT_CPU_ID
:
2248 if (atomic_read(&kvm
->online_vcpus
) != 0)
2251 kvm
->bsp_vcpu_id
= arg
;
2255 r
= kvm_arch_vm_ioctl(filp
, ioctl
, arg
);
2261 static int kvm_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
2263 struct page
*page
[1];
2266 gfn_t gfn
= vmf
->pgoff
;
2267 struct kvm
*kvm
= vma
->vm_file
->private_data
;
2269 addr
= gfn_to_hva(kvm
, gfn
);
2270 if (kvm_is_error_hva(addr
))
2271 return VM_FAULT_SIGBUS
;
2273 npages
= get_user_pages(current
, current
->mm
, addr
, 1, 1, 0, page
,
2275 if (unlikely(npages
!= 1))
2276 return VM_FAULT_SIGBUS
;
2278 vmf
->page
= page
[0];
2282 static struct vm_operations_struct kvm_vm_vm_ops
= {
2283 .fault
= kvm_vm_fault
,
2286 static int kvm_vm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2288 vma
->vm_ops
= &kvm_vm_vm_ops
;
2292 static struct file_operations kvm_vm_fops
= {
2293 .release
= kvm_vm_release
,
2294 .unlocked_ioctl
= kvm_vm_ioctl
,
2295 .compat_ioctl
= kvm_vm_ioctl
,
2296 .mmap
= kvm_vm_mmap
,
2299 static int kvm_dev_ioctl_create_vm(void)
2304 kvm
= kvm_create_vm();
2306 return PTR_ERR(kvm
);
2307 fd
= anon_inode_getfd("kvm-vm", &kvm_vm_fops
, kvm
, 0);
2314 static long kvm_dev_ioctl_check_extension_generic(long arg
)
2317 case KVM_CAP_USER_MEMORY
:
2318 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS
:
2319 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
:
2320 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
2321 case KVM_CAP_SET_BOOT_CPU_ID
:
2324 #ifdef CONFIG_HAVE_KVM_IRQCHIP
2325 case KVM_CAP_IRQ_ROUTING
:
2326 return KVM_MAX_IRQ_ROUTES
;
2331 return kvm_dev_ioctl_check_extension(arg
);
2334 static long kvm_dev_ioctl(struct file
*filp
,
2335 unsigned int ioctl
, unsigned long arg
)
2340 case KVM_GET_API_VERSION
:
2344 r
= KVM_API_VERSION
;
2350 r
= kvm_dev_ioctl_create_vm();
2352 case KVM_CHECK_EXTENSION
:
2353 r
= kvm_dev_ioctl_check_extension_generic(arg
);
2355 case KVM_GET_VCPU_MMAP_SIZE
:
2359 r
= PAGE_SIZE
; /* struct kvm_run */
2361 r
+= PAGE_SIZE
; /* pio data page */
2363 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2364 r
+= PAGE_SIZE
; /* coalesced mmio ring page */
2367 case KVM_TRACE_ENABLE
:
2368 case KVM_TRACE_PAUSE
:
2369 case KVM_TRACE_DISABLE
:
2370 r
= kvm_trace_ioctl(ioctl
, arg
);
2373 return kvm_arch_dev_ioctl(filp
, ioctl
, arg
);
2379 static struct file_operations kvm_chardev_ops
= {
2380 .unlocked_ioctl
= kvm_dev_ioctl
,
2381 .compat_ioctl
= kvm_dev_ioctl
,
2384 static struct miscdevice kvm_dev
= {
2390 static void hardware_enable(void *junk
)
2392 int cpu
= raw_smp_processor_id();
2394 if (cpumask_test_cpu(cpu
, cpus_hardware_enabled
))
2396 cpumask_set_cpu(cpu
, cpus_hardware_enabled
);
2397 kvm_arch_hardware_enable(NULL
);
2400 static void hardware_disable(void *junk
)
2402 int cpu
= raw_smp_processor_id();
2404 if (!cpumask_test_cpu(cpu
, cpus_hardware_enabled
))
2406 cpumask_clear_cpu(cpu
, cpus_hardware_enabled
);
2407 kvm_arch_hardware_disable(NULL
);
2410 static int kvm_cpu_hotplug(struct notifier_block
*notifier
, unsigned long val
,
2415 val
&= ~CPU_TASKS_FROZEN
;
2418 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
2420 hardware_disable(NULL
);
2422 case CPU_UP_CANCELED
:
2423 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
2425 smp_call_function_single(cpu
, hardware_disable
, NULL
, 1);
2428 printk(KERN_INFO
"kvm: enabling virtualization on CPU%d\n",
2430 smp_call_function_single(cpu
, hardware_enable
, NULL
, 1);
2437 asmlinkage
void kvm_handle_fault_on_reboot(void)
2440 /* spin while reset goes on */
2443 /* Fault while not rebooting. We want the trace. */
2446 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot
);
2448 static int kvm_reboot(struct notifier_block
*notifier
, unsigned long val
,
2452 * Some (well, at least mine) BIOSes hang on reboot if
2455 * And Intel TXT required VMX off for all cpu when system shutdown.
2457 printk(KERN_INFO
"kvm: exiting hardware virtualization\n");
2458 kvm_rebooting
= true;
2459 on_each_cpu(hardware_disable
, NULL
, 1);
2463 static struct notifier_block kvm_reboot_notifier
= {
2464 .notifier_call
= kvm_reboot
,
2468 void kvm_io_bus_init(struct kvm_io_bus
*bus
)
2470 memset(bus
, 0, sizeof(*bus
));
2473 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
)
2477 for (i
= 0; i
< bus
->dev_count
; i
++) {
2478 struct kvm_io_device
*pos
= bus
->devs
[i
];
2480 kvm_iodevice_destructor(pos
);
2484 struct kvm_io_device
*kvm_io_bus_find_dev(struct kvm_io_bus
*bus
,
2485 gpa_t addr
, int len
, int is_write
)
2489 for (i
= 0; i
< bus
->dev_count
; i
++) {
2490 struct kvm_io_device
*pos
= bus
->devs
[i
];
2492 if (kvm_iodevice_in_range(pos
, addr
, len
, is_write
))
2499 void kvm_io_bus_register_dev(struct kvm_io_bus
*bus
, struct kvm_io_device
*dev
)
2501 BUG_ON(bus
->dev_count
> (NR_IOBUS_DEVS
-1));
2503 bus
->devs
[bus
->dev_count
++] = dev
;
2506 static struct notifier_block kvm_cpu_notifier
= {
2507 .notifier_call
= kvm_cpu_hotplug
,
2508 .priority
= 20, /* must be > scheduler priority */
2511 static int vm_stat_get(void *_offset
, u64
*val
)
2513 unsigned offset
= (long)_offset
;
2517 spin_lock(&kvm_lock
);
2518 list_for_each_entry(kvm
, &vm_list
, vm_list
)
2519 *val
+= *(u32
*)((void *)kvm
+ offset
);
2520 spin_unlock(&kvm_lock
);
2524 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops
, vm_stat_get
, NULL
, "%llu\n");
2526 static int vcpu_stat_get(void *_offset
, u64
*val
)
2528 unsigned offset
= (long)_offset
;
2530 struct kvm_vcpu
*vcpu
;
2534 spin_lock(&kvm_lock
);
2535 list_for_each_entry(kvm
, &vm_list
, vm_list
)
2536 kvm_for_each_vcpu(i
, vcpu
, kvm
)
2537 *val
+= *(u32
*)((void *)vcpu
+ offset
);
2539 spin_unlock(&kvm_lock
);
2543 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops
, vcpu_stat_get
, NULL
, "%llu\n");
2545 static struct file_operations
*stat_fops
[] = {
2546 [KVM_STAT_VCPU
] = &vcpu_stat_fops
,
2547 [KVM_STAT_VM
] = &vm_stat_fops
,
2550 static void kvm_init_debug(void)
2552 struct kvm_stats_debugfs_item
*p
;
2554 kvm_debugfs_dir
= debugfs_create_dir("kvm", NULL
);
2555 for (p
= debugfs_entries
; p
->name
; ++p
)
2556 p
->dentry
= debugfs_create_file(p
->name
, 0444, kvm_debugfs_dir
,
2557 (void *)(long)p
->offset
,
2558 stat_fops
[p
->kind
]);
2561 static void kvm_exit_debug(void)
2563 struct kvm_stats_debugfs_item
*p
;
2565 for (p
= debugfs_entries
; p
->name
; ++p
)
2566 debugfs_remove(p
->dentry
);
2567 debugfs_remove(kvm_debugfs_dir
);
2570 static int kvm_suspend(struct sys_device
*dev
, pm_message_t state
)
2572 hardware_disable(NULL
);
2576 static int kvm_resume(struct sys_device
*dev
)
2578 hardware_enable(NULL
);
2582 static struct sysdev_class kvm_sysdev_class
= {
2584 .suspend
= kvm_suspend
,
2585 .resume
= kvm_resume
,
2588 static struct sys_device kvm_sysdev
= {
2590 .cls
= &kvm_sysdev_class
,
2593 struct page
*bad_page
;
2597 struct kvm_vcpu
*preempt_notifier_to_vcpu(struct preempt_notifier
*pn
)
2599 return container_of(pn
, struct kvm_vcpu
, preempt_notifier
);
2602 static void kvm_sched_in(struct preempt_notifier
*pn
, int cpu
)
2604 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
2606 kvm_arch_vcpu_load(vcpu
, cpu
);
2609 static void kvm_sched_out(struct preempt_notifier
*pn
,
2610 struct task_struct
*next
)
2612 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
2614 kvm_arch_vcpu_put(vcpu
);
2617 int kvm_init(void *opaque
, unsigned int vcpu_size
,
2618 struct module
*module
)
2625 r
= kvm_arch_init(opaque
);
2629 bad_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
2631 if (bad_page
== NULL
) {
2636 bad_pfn
= page_to_pfn(bad_page
);
2638 if (!zalloc_cpumask_var(&cpus_hardware_enabled
, GFP_KERNEL
)) {
2643 r
= kvm_arch_hardware_setup();
2647 for_each_online_cpu(cpu
) {
2648 smp_call_function_single(cpu
,
2649 kvm_arch_check_processor_compat
,
2655 on_each_cpu(hardware_enable
, NULL
, 1);
2656 r
= register_cpu_notifier(&kvm_cpu_notifier
);
2659 register_reboot_notifier(&kvm_reboot_notifier
);
2661 r
= sysdev_class_register(&kvm_sysdev_class
);
2665 r
= sysdev_register(&kvm_sysdev
);
2669 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2670 kvm_vcpu_cache
= kmem_cache_create("kvm_vcpu", vcpu_size
,
2671 __alignof__(struct kvm_vcpu
),
2673 if (!kvm_vcpu_cache
) {
2678 kvm_chardev_ops
.owner
= module
;
2679 kvm_vm_fops
.owner
= module
;
2680 kvm_vcpu_fops
.owner
= module
;
2682 r
= misc_register(&kvm_dev
);
2684 printk(KERN_ERR
"kvm: misc device register failed\n");
2688 kvm_preempt_ops
.sched_in
= kvm_sched_in
;
2689 kvm_preempt_ops
.sched_out
= kvm_sched_out
;
2694 kmem_cache_destroy(kvm_vcpu_cache
);
2696 sysdev_unregister(&kvm_sysdev
);
2698 sysdev_class_unregister(&kvm_sysdev_class
);
2700 unregister_reboot_notifier(&kvm_reboot_notifier
);
2701 unregister_cpu_notifier(&kvm_cpu_notifier
);
2703 on_each_cpu(hardware_disable
, NULL
, 1);
2705 kvm_arch_hardware_unsetup();
2707 free_cpumask_var(cpus_hardware_enabled
);
2709 __free_page(bad_page
);
2716 EXPORT_SYMBOL_GPL(kvm_init
);
2720 kvm_trace_cleanup();
2721 misc_deregister(&kvm_dev
);
2722 kmem_cache_destroy(kvm_vcpu_cache
);
2723 sysdev_unregister(&kvm_sysdev
);
2724 sysdev_class_unregister(&kvm_sysdev_class
);
2725 unregister_reboot_notifier(&kvm_reboot_notifier
);
2726 unregister_cpu_notifier(&kvm_cpu_notifier
);
2727 on_each_cpu(hardware_disable
, NULL
, 1);
2728 kvm_arch_hardware_unsetup();
2731 free_cpumask_var(cpus_hardware_enabled
);
2732 __free_page(bad_page
);
2734 EXPORT_SYMBOL_GPL(kvm_exit
);