KVM: opencode gfn_to_page in kvm_vm_fault
[deliverable/linux.git] / virt / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
e2174021 18#include "iodev.h"
6aa8b732 19
edf88417 20#include <linux/kvm_host.h>
6aa8b732
AK
21#include <linux/kvm.h>
22#include <linux/module.h>
23#include <linux/errno.h>
6aa8b732
AK
24#include <linux/percpu.h>
25#include <linux/gfp.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
6aa8b732 29#include <linux/reboot.h>
6aa8b732
AK
30#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
59ae6c6b 33#include <linux/sysdev.h>
774c47f1 34#include <linux/cpu.h>
e8edc6e0 35#include <linux/sched.h>
d9e368d6
AK
36#include <linux/cpumask.h>
37#include <linux/smp.h>
d6d28168 38#include <linux/anon_inodes.h>
04d2cc77 39#include <linux/profile.h>
7aa81cc0 40#include <linux/kvm_para.h>
6fc138d2 41#include <linux/pagemap.h>
8d4e1288 42#include <linux/mman.h>
35149e21 43#include <linux/swap.h>
62c476c7 44#include <linux/intel-iommu.h>
6aa8b732 45
e495606d 46#include <asm/processor.h>
e495606d
AK
47#include <asm/io.h>
48#include <asm/uaccess.h>
3e021bf5 49#include <asm/pgtable.h>
6aa8b732 50
5f94c174
LV
51#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
52#include "coalesced_mmio.h"
53#endif
54
6aa8b732
AK
55MODULE_AUTHOR("Qumranet");
56MODULE_LICENSE("GPL");
57
e9b11c17
ZX
58DEFINE_SPINLOCK(kvm_lock);
59LIST_HEAD(vm_list);
133de902 60
1b6c0168
AK
61static cpumask_t cpus_hardware_enabled;
62
c16f862d
RR
63struct kmem_cache *kvm_vcpu_cache;
64EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 65
15ad7146
AK
66static __read_mostly struct preempt_ops kvm_preempt_ops;
67
76f7c879 68struct dentry *kvm_debugfs_dir;
6aa8b732 69
bccf2150
AK
70static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
71 unsigned long arg);
72
4ecac3fd
AK
73bool kvm_rebooting;
74
5aacf0ca
JM
75static inline int valid_vcpu(int n)
76{
77 return likely(n >= 0 && n < KVM_MAX_VCPUS);
78}
79
62c476c7 80inline int is_mmio_pfn(pfn_t pfn)
cbff90a7
BAY
81{
82 if (pfn_valid(pfn))
83 return PageReserved(pfn_to_page(pfn));
84
85 return true;
86}
87
bccf2150
AK
88/*
89 * Switches to specified vcpu, until a matching vcpu_put()
90 */
313a3dc7 91void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 92{
15ad7146
AK
93 int cpu;
94
bccf2150 95 mutex_lock(&vcpu->mutex);
15ad7146
AK
96 cpu = get_cpu();
97 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 98 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 99 put_cpu();
6aa8b732
AK
100}
101
313a3dc7 102void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 103{
15ad7146 104 preempt_disable();
313a3dc7 105 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
106 preempt_notifier_unregister(&vcpu->preempt_notifier);
107 preempt_enable();
6aa8b732
AK
108 mutex_unlock(&vcpu->mutex);
109}
110
d9e368d6
AK
111static void ack_flush(void *_completed)
112{
d9e368d6
AK
113}
114
115void kvm_flush_remote_tlbs(struct kvm *kvm)
116{
597a5f55 117 int i, cpu, me;
d9e368d6
AK
118 cpumask_t cpus;
119 struct kvm_vcpu *vcpu;
d9e368d6 120
597a5f55 121 me = get_cpu();
d9e368d6 122 cpus_clear(cpus);
fb3f0f51
RR
123 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
124 vcpu = kvm->vcpus[i];
125 if (!vcpu)
126 continue;
3176bc3e 127 if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
d9e368d6
AK
128 continue;
129 cpu = vcpu->cpu;
597a5f55 130 if (cpu != -1 && cpu != me)
49d3bd7e 131 cpu_set(cpu, cpus);
d9e368d6 132 }
0f74a24c 133 if (cpus_empty(cpus))
597a5f55 134 goto out;
0f74a24c 135 ++kvm->stat.remote_tlb_flush;
49d3bd7e 136 smp_call_function_mask(cpus, ack_flush, NULL, 1);
597a5f55
AK
137out:
138 put_cpu();
d9e368d6
AK
139}
140
2e53d63a
MT
141void kvm_reload_remote_mmus(struct kvm *kvm)
142{
597a5f55 143 int i, cpu, me;
2e53d63a
MT
144 cpumask_t cpus;
145 struct kvm_vcpu *vcpu;
146
597a5f55 147 me = get_cpu();
2e53d63a
MT
148 cpus_clear(cpus);
149 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
150 vcpu = kvm->vcpus[i];
151 if (!vcpu)
152 continue;
153 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
154 continue;
155 cpu = vcpu->cpu;
597a5f55 156 if (cpu != -1 && cpu != me)
2e53d63a
MT
157 cpu_set(cpu, cpus);
158 }
159 if (cpus_empty(cpus))
597a5f55 160 goto out;
2e53d63a 161 smp_call_function_mask(cpus, ack_flush, NULL, 1);
597a5f55
AK
162out:
163 put_cpu();
2e53d63a
MT
164}
165
166
fb3f0f51
RR
167int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
168{
169 struct page *page;
170 int r;
171
172 mutex_init(&vcpu->mutex);
173 vcpu->cpu = -1;
fb3f0f51
RR
174 vcpu->kvm = kvm;
175 vcpu->vcpu_id = id;
b6958ce4 176 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
177
178 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
179 if (!page) {
180 r = -ENOMEM;
181 goto fail;
182 }
183 vcpu->run = page_address(page);
184
e9b11c17 185 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 186 if (r < 0)
e9b11c17 187 goto fail_free_run;
fb3f0f51
RR
188 return 0;
189
fb3f0f51
RR
190fail_free_run:
191 free_page((unsigned long)vcpu->run);
192fail:
76fafa5e 193 return r;
fb3f0f51
RR
194}
195EXPORT_SYMBOL_GPL(kvm_vcpu_init);
196
197void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
198{
e9b11c17 199 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
200 free_page((unsigned long)vcpu->run);
201}
202EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
203
e930bffe
AA
204#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
205static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
206{
207 return container_of(mn, struct kvm, mmu_notifier);
208}
209
210static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
211 struct mm_struct *mm,
212 unsigned long address)
213{
214 struct kvm *kvm = mmu_notifier_to_kvm(mn);
215 int need_tlb_flush;
216
217 /*
218 * When ->invalidate_page runs, the linux pte has been zapped
219 * already but the page is still allocated until
220 * ->invalidate_page returns. So if we increase the sequence
221 * here the kvm page fault will notice if the spte can't be
222 * established because the page is going to be freed. If
223 * instead the kvm page fault establishes the spte before
224 * ->invalidate_page runs, kvm_unmap_hva will release it
225 * before returning.
226 *
227 * The sequence increase only need to be seen at spin_unlock
228 * time, and not at spin_lock time.
229 *
230 * Increasing the sequence after the spin_unlock would be
231 * unsafe because the kvm page fault could then establish the
232 * pte after kvm_unmap_hva returned, without noticing the page
233 * is going to be freed.
234 */
235 spin_lock(&kvm->mmu_lock);
236 kvm->mmu_notifier_seq++;
237 need_tlb_flush = kvm_unmap_hva(kvm, address);
238 spin_unlock(&kvm->mmu_lock);
239
240 /* we've to flush the tlb before the pages can be freed */
241 if (need_tlb_flush)
242 kvm_flush_remote_tlbs(kvm);
243
244}
245
246static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
247 struct mm_struct *mm,
248 unsigned long start,
249 unsigned long end)
250{
251 struct kvm *kvm = mmu_notifier_to_kvm(mn);
252 int need_tlb_flush = 0;
253
254 spin_lock(&kvm->mmu_lock);
255 /*
256 * The count increase must become visible at unlock time as no
257 * spte can be established without taking the mmu_lock and
258 * count is also read inside the mmu_lock critical section.
259 */
260 kvm->mmu_notifier_count++;
261 for (; start < end; start += PAGE_SIZE)
262 need_tlb_flush |= kvm_unmap_hva(kvm, start);
263 spin_unlock(&kvm->mmu_lock);
264
265 /* we've to flush the tlb before the pages can be freed */
266 if (need_tlb_flush)
267 kvm_flush_remote_tlbs(kvm);
268}
269
270static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
271 struct mm_struct *mm,
272 unsigned long start,
273 unsigned long end)
274{
275 struct kvm *kvm = mmu_notifier_to_kvm(mn);
276
277 spin_lock(&kvm->mmu_lock);
278 /*
279 * This sequence increase will notify the kvm page fault that
280 * the page that is going to be mapped in the spte could have
281 * been freed.
282 */
283 kvm->mmu_notifier_seq++;
284 /*
285 * The above sequence increase must be visible before the
286 * below count decrease but both values are read by the kvm
287 * page fault under mmu_lock spinlock so we don't need to add
288 * a smb_wmb() here in between the two.
289 */
290 kvm->mmu_notifier_count--;
291 spin_unlock(&kvm->mmu_lock);
292
293 BUG_ON(kvm->mmu_notifier_count < 0);
294}
295
296static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
297 struct mm_struct *mm,
298 unsigned long address)
299{
300 struct kvm *kvm = mmu_notifier_to_kvm(mn);
301 int young;
302
303 spin_lock(&kvm->mmu_lock);
304 young = kvm_age_hva(kvm, address);
305 spin_unlock(&kvm->mmu_lock);
306
307 if (young)
308 kvm_flush_remote_tlbs(kvm);
309
310 return young;
311}
312
313static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
314 .invalidate_page = kvm_mmu_notifier_invalidate_page,
315 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
316 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
317 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
318};
319#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
320
f17abe9a 321static struct kvm *kvm_create_vm(void)
6aa8b732 322{
d19a9cd2 323 struct kvm *kvm = kvm_arch_create_vm();
5f94c174
LV
324#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
325 struct page *page;
326#endif
6aa8b732 327
d19a9cd2
ZX
328 if (IS_ERR(kvm))
329 goto out;
6aa8b732 330
5f94c174
LV
331#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
332 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
333 if (!page) {
334 kfree(kvm);
335 return ERR_PTR(-ENOMEM);
336 }
337 kvm->coalesced_mmio_ring =
338 (struct kvm_coalesced_mmio_ring *)page_address(page);
339#endif
340
e930bffe
AA
341#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
342 {
343 int err;
344 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
345 err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
346 if (err) {
347#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
348 put_page(page);
349#endif
350 kfree(kvm);
351 return ERR_PTR(err);
352 }
353 }
354#endif
355
6d4e4c4f
AK
356 kvm->mm = current->mm;
357 atomic_inc(&kvm->mm->mm_count);
aaee2c94 358 spin_lock_init(&kvm->mmu_lock);
74906345 359 kvm_io_bus_init(&kvm->pio_bus);
11ec2804 360 mutex_init(&kvm->lock);
2eeb2e94 361 kvm_io_bus_init(&kvm->mmio_bus);
72dc67a6 362 init_rwsem(&kvm->slots_lock);
d39f13b0 363 atomic_set(&kvm->users_count, 1);
5e58cfe4
RR
364 spin_lock(&kvm_lock);
365 list_add(&kvm->vm_list, &vm_list);
366 spin_unlock(&kvm_lock);
5f94c174
LV
367#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
368 kvm_coalesced_mmio_init(kvm);
369#endif
d19a9cd2 370out:
f17abe9a
AK
371 return kvm;
372}
373
6aa8b732
AK
374/*
375 * Free any memory in @free but not in @dont.
376 */
377static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
378 struct kvm_memory_slot *dont)
379{
290fc38d
IE
380 if (!dont || free->rmap != dont->rmap)
381 vfree(free->rmap);
6aa8b732
AK
382
383 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
384 vfree(free->dirty_bitmap);
385
05da4558
MT
386 if (!dont || free->lpage_info != dont->lpage_info)
387 vfree(free->lpage_info);
388
6aa8b732 389 free->npages = 0;
8b6d44c7 390 free->dirty_bitmap = NULL;
8d4e1288 391 free->rmap = NULL;
05da4558 392 free->lpage_info = NULL;
6aa8b732
AK
393}
394
d19a9cd2 395void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
396{
397 int i;
398
399 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 400 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
401}
402
f17abe9a
AK
403static void kvm_destroy_vm(struct kvm *kvm)
404{
6d4e4c4f
AK
405 struct mm_struct *mm = kvm->mm;
406
133de902
AK
407 spin_lock(&kvm_lock);
408 list_del(&kvm->vm_list);
409 spin_unlock(&kvm_lock);
74906345 410 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 411 kvm_io_bus_destroy(&kvm->mmio_bus);
5f94c174
LV
412#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
413 if (kvm->coalesced_mmio_ring != NULL)
414 free_page((unsigned long)kvm->coalesced_mmio_ring);
e930bffe
AA
415#endif
416#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
417 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
5f94c174 418#endif
d19a9cd2 419 kvm_arch_destroy_vm(kvm);
6d4e4c4f 420 mmdrop(mm);
f17abe9a
AK
421}
422
d39f13b0
IE
423void kvm_get_kvm(struct kvm *kvm)
424{
425 atomic_inc(&kvm->users_count);
426}
427EXPORT_SYMBOL_GPL(kvm_get_kvm);
428
429void kvm_put_kvm(struct kvm *kvm)
430{
431 if (atomic_dec_and_test(&kvm->users_count))
432 kvm_destroy_vm(kvm);
433}
434EXPORT_SYMBOL_GPL(kvm_put_kvm);
435
436
f17abe9a
AK
437static int kvm_vm_release(struct inode *inode, struct file *filp)
438{
439 struct kvm *kvm = filp->private_data;
440
d39f13b0 441 kvm_put_kvm(kvm);
6aa8b732
AK
442 return 0;
443}
444
6aa8b732
AK
445/*
446 * Allocate some memory and give it an address in the guest physical address
447 * space.
448 *
449 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e 450 *
10589a46 451 * Must be called holding mmap_sem for write.
6aa8b732 452 */
f78e0e2e
SY
453int __kvm_set_memory_region(struct kvm *kvm,
454 struct kvm_userspace_memory_region *mem,
455 int user_alloc)
6aa8b732
AK
456{
457 int r;
458 gfn_t base_gfn;
459 unsigned long npages;
460 unsigned long i;
461 struct kvm_memory_slot *memslot;
462 struct kvm_memory_slot old, new;
6aa8b732
AK
463
464 r = -EINVAL;
465 /* General sanity checks */
466 if (mem->memory_size & (PAGE_SIZE - 1))
467 goto out;
468 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
469 goto out;
e0d62c7f 470 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
471 goto out;
472 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
473 goto out;
474
475 memslot = &kvm->memslots[mem->slot];
476 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
477 npages = mem->memory_size >> PAGE_SHIFT;
478
479 if (!npages)
480 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
481
6aa8b732
AK
482 new = old = *memslot;
483
484 new.base_gfn = base_gfn;
485 new.npages = npages;
486 new.flags = mem->flags;
487
488 /* Disallow changing a memory slot's size. */
489 r = -EINVAL;
490 if (npages && old.npages && npages != old.npages)
f78e0e2e 491 goto out_free;
6aa8b732
AK
492
493 /* Check for overlaps */
494 r = -EEXIST;
495 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
496 struct kvm_memory_slot *s = &kvm->memslots[i];
497
498 if (s == memslot)
499 continue;
500 if (!((base_gfn + npages <= s->base_gfn) ||
501 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 502 goto out_free;
6aa8b732 503 }
6aa8b732 504
6aa8b732
AK
505 /* Free page dirty bitmap if unneeded */
506 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 507 new.dirty_bitmap = NULL;
6aa8b732
AK
508
509 r = -ENOMEM;
510
511 /* Allocate if a slot is being created */
eff0114a 512#ifndef CONFIG_S390
8d4e1288 513 if (npages && !new.rmap) {
d77c26fc 514 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
515
516 if (!new.rmap)
f78e0e2e 517 goto out_free;
290fc38d 518
290fc38d 519 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 520
80b14b5b 521 new.user_alloc = user_alloc;
604b38ac
AA
522 /*
523 * hva_to_rmmap() serialzies with the mmu_lock and to be
524 * safe it has to ignore memslots with !user_alloc &&
525 * !userspace_addr.
526 */
527 if (user_alloc)
528 new.userspace_addr = mem->userspace_addr;
529 else
530 new.userspace_addr = 0;
6aa8b732 531 }
05da4558
MT
532 if (npages && !new.lpage_info) {
533 int largepages = npages / KVM_PAGES_PER_HPAGE;
534 if (npages % KVM_PAGES_PER_HPAGE)
535 largepages++;
536 if (base_gfn % KVM_PAGES_PER_HPAGE)
537 largepages++;
538
539 new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
540
541 if (!new.lpage_info)
542 goto out_free;
543
544 memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
545
546 if (base_gfn % KVM_PAGES_PER_HPAGE)
547 new.lpage_info[0].write_count = 1;
548 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
549 new.lpage_info[largepages-1].write_count = 1;
550 }
6aa8b732
AK
551
552 /* Allocate page dirty bitmap if needed */
553 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
554 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
555
556 new.dirty_bitmap = vmalloc(dirty_bytes);
557 if (!new.dirty_bitmap)
f78e0e2e 558 goto out_free;
6aa8b732
AK
559 memset(new.dirty_bitmap, 0, dirty_bytes);
560 }
eff0114a 561#endif /* not defined CONFIG_S390 */
6aa8b732 562
34d4cb8f
MT
563 if (!npages)
564 kvm_arch_flush_shadow(kvm);
565
604b38ac
AA
566 spin_lock(&kvm->mmu_lock);
567 if (mem->slot >= kvm->nmemslots)
568 kvm->nmemslots = mem->slot + 1;
569
3ad82a7e 570 *memslot = new;
604b38ac 571 spin_unlock(&kvm->mmu_lock);
3ad82a7e 572
0de10343
ZX
573 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
574 if (r) {
604b38ac 575 spin_lock(&kvm->mmu_lock);
0de10343 576 *memslot = old;
604b38ac 577 spin_unlock(&kvm->mmu_lock);
0de10343 578 goto out_free;
82ce2c96
IE
579 }
580
6aa8b732 581 kvm_free_physmem_slot(&old, &new);
62c476c7
BAY
582
583 /* map the pages in iommu page table */
584 r = kvm_iommu_map_pages(kvm, base_gfn, npages);
585 if (r)
586 goto out;
587
6aa8b732
AK
588 return 0;
589
f78e0e2e 590out_free:
6aa8b732
AK
591 kvm_free_physmem_slot(&new, &old);
592out:
593 return r;
210c7c4d
IE
594
595}
f78e0e2e
SY
596EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
597
598int kvm_set_memory_region(struct kvm *kvm,
599 struct kvm_userspace_memory_region *mem,
600 int user_alloc)
601{
602 int r;
603
72dc67a6 604 down_write(&kvm->slots_lock);
f78e0e2e 605 r = __kvm_set_memory_region(kvm, mem, user_alloc);
72dc67a6 606 up_write(&kvm->slots_lock);
f78e0e2e
SY
607 return r;
608}
210c7c4d
IE
609EXPORT_SYMBOL_GPL(kvm_set_memory_region);
610
1fe779f8
CO
611int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
612 struct
613 kvm_userspace_memory_region *mem,
614 int user_alloc)
210c7c4d 615{
e0d62c7f
IE
616 if (mem->slot >= KVM_MEMORY_SLOTS)
617 return -EINVAL;
210c7c4d 618 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
619}
620
5bb064dc
ZX
621int kvm_get_dirty_log(struct kvm *kvm,
622 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
623{
624 struct kvm_memory_slot *memslot;
625 int r, i;
626 int n;
627 unsigned long any = 0;
628
6aa8b732
AK
629 r = -EINVAL;
630 if (log->slot >= KVM_MEMORY_SLOTS)
631 goto out;
632
633 memslot = &kvm->memslots[log->slot];
634 r = -ENOENT;
635 if (!memslot->dirty_bitmap)
636 goto out;
637
cd1a4a98 638 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 639
cd1a4a98 640 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
641 any = memslot->dirty_bitmap[i];
642
643 r = -EFAULT;
644 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
645 goto out;
646
5bb064dc
ZX
647 if (any)
648 *is_dirty = 1;
6aa8b732
AK
649
650 r = 0;
6aa8b732 651out:
6aa8b732
AK
652 return r;
653}
654
cea7bb21
IE
655int is_error_page(struct page *page)
656{
657 return page == bad_page;
658}
659EXPORT_SYMBOL_GPL(is_error_page);
660
35149e21
AL
661int is_error_pfn(pfn_t pfn)
662{
663 return pfn == bad_pfn;
664}
665EXPORT_SYMBOL_GPL(is_error_pfn);
666
f9d46eb0
IE
667static inline unsigned long bad_hva(void)
668{
669 return PAGE_OFFSET;
670}
671
672int kvm_is_error_hva(unsigned long addr)
673{
674 return addr == bad_hva();
675}
676EXPORT_SYMBOL_GPL(kvm_is_error_hva);
677
e8207547 678static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
679{
680 int i;
681
682 for (i = 0; i < kvm->nmemslots; ++i) {
683 struct kvm_memory_slot *memslot = &kvm->memslots[i];
684
685 if (gfn >= memslot->base_gfn
686 && gfn < memslot->base_gfn + memslot->npages)
687 return memslot;
688 }
8b6d44c7 689 return NULL;
6aa8b732 690}
e8207547
AK
691
692struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
693{
694 gfn = unalias_gfn(kvm, gfn);
695 return __gfn_to_memslot(kvm, gfn);
696}
6aa8b732 697
e0d62c7f
IE
698int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
699{
700 int i;
701
702 gfn = unalias_gfn(kvm, gfn);
703 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
704 struct kvm_memory_slot *memslot = &kvm->memslots[i];
705
706 if (gfn >= memslot->base_gfn
707 && gfn < memslot->base_gfn + memslot->npages)
708 return 1;
709 }
710 return 0;
711}
712EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
713
05da4558 714unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
539cb660
IE
715{
716 struct kvm_memory_slot *slot;
717
718 gfn = unalias_gfn(kvm, gfn);
719 slot = __gfn_to_memslot(kvm, gfn);
720 if (!slot)
721 return bad_hva();
722 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
723}
0d150298 724EXPORT_SYMBOL_GPL(gfn_to_hva);
539cb660 725
aab61cc0
AL
726/*
727 * Requires current->mm->mmap_sem to be held
728 */
35149e21 729pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
954bbbc2 730{
8d4e1288 731 struct page *page[1];
539cb660 732 unsigned long addr;
8d4e1288 733 int npages;
2e2e3738 734 pfn_t pfn;
954bbbc2 735
60395224
AK
736 might_sleep();
737
539cb660
IE
738 addr = gfn_to_hva(kvm, gfn);
739 if (kvm_is_error_hva(addr)) {
8a7ae055 740 get_page(bad_page);
35149e21 741 return page_to_pfn(bad_page);
8a7ae055 742 }
8d4e1288 743
d657c733 744 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
539cb660
IE
745 NULL);
746
2e2e3738
AL
747 if (unlikely(npages != 1)) {
748 struct vm_area_struct *vma;
749
750 vma = find_vma(current->mm, addr);
751 if (vma == NULL || addr < vma->vm_start ||
752 !(vma->vm_flags & VM_PFNMAP)) {
753 get_page(bad_page);
754 return page_to_pfn(bad_page);
755 }
756
757 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
cbff90a7 758 BUG_ON(!is_mmio_pfn(pfn));
2e2e3738
AL
759 } else
760 pfn = page_to_pfn(page[0]);
8d4e1288 761
2e2e3738 762 return pfn;
35149e21
AL
763}
764
765EXPORT_SYMBOL_GPL(gfn_to_pfn);
766
767struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
768{
2e2e3738
AL
769 pfn_t pfn;
770
771 pfn = gfn_to_pfn(kvm, gfn);
cbff90a7 772 if (!is_mmio_pfn(pfn))
2e2e3738
AL
773 return pfn_to_page(pfn);
774
cbff90a7 775 WARN_ON(is_mmio_pfn(pfn));
2e2e3738
AL
776
777 get_page(bad_page);
778 return bad_page;
954bbbc2 779}
aab61cc0 780
954bbbc2
AK
781EXPORT_SYMBOL_GPL(gfn_to_page);
782
b4231d61
IE
783void kvm_release_page_clean(struct page *page)
784{
35149e21 785 kvm_release_pfn_clean(page_to_pfn(page));
b4231d61
IE
786}
787EXPORT_SYMBOL_GPL(kvm_release_page_clean);
788
35149e21
AL
789void kvm_release_pfn_clean(pfn_t pfn)
790{
cbff90a7 791 if (!is_mmio_pfn(pfn))
2e2e3738 792 put_page(pfn_to_page(pfn));
35149e21
AL
793}
794EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
795
b4231d61 796void kvm_release_page_dirty(struct page *page)
8a7ae055 797{
35149e21
AL
798 kvm_release_pfn_dirty(page_to_pfn(page));
799}
800EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
801
802void kvm_release_pfn_dirty(pfn_t pfn)
803{
804 kvm_set_pfn_dirty(pfn);
805 kvm_release_pfn_clean(pfn);
806}
807EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
808
809void kvm_set_page_dirty(struct page *page)
810{
811 kvm_set_pfn_dirty(page_to_pfn(page));
812}
813EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
814
815void kvm_set_pfn_dirty(pfn_t pfn)
816{
cbff90a7 817 if (!is_mmio_pfn(pfn)) {
2e2e3738
AL
818 struct page *page = pfn_to_page(pfn);
819 if (!PageReserved(page))
820 SetPageDirty(page);
821 }
8a7ae055 822}
35149e21
AL
823EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
824
825void kvm_set_pfn_accessed(pfn_t pfn)
826{
cbff90a7 827 if (!is_mmio_pfn(pfn))
2e2e3738 828 mark_page_accessed(pfn_to_page(pfn));
35149e21
AL
829}
830EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
831
832void kvm_get_pfn(pfn_t pfn)
833{
cbff90a7 834 if (!is_mmio_pfn(pfn))
2e2e3738 835 get_page(pfn_to_page(pfn));
35149e21
AL
836}
837EXPORT_SYMBOL_GPL(kvm_get_pfn);
8a7ae055 838
195aefde
IE
839static int next_segment(unsigned long len, int offset)
840{
841 if (len > PAGE_SIZE - offset)
842 return PAGE_SIZE - offset;
843 else
844 return len;
845}
846
847int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
848 int len)
849{
e0506bcb
IE
850 int r;
851 unsigned long addr;
195aefde 852
e0506bcb
IE
853 addr = gfn_to_hva(kvm, gfn);
854 if (kvm_is_error_hva(addr))
855 return -EFAULT;
856 r = copy_from_user(data, (void __user *)addr + offset, len);
857 if (r)
195aefde 858 return -EFAULT;
195aefde
IE
859 return 0;
860}
861EXPORT_SYMBOL_GPL(kvm_read_guest_page);
862
863int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
864{
865 gfn_t gfn = gpa >> PAGE_SHIFT;
866 int seg;
867 int offset = offset_in_page(gpa);
868 int ret;
869
870 while ((seg = next_segment(len, offset)) != 0) {
871 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
872 if (ret < 0)
873 return ret;
874 offset = 0;
875 len -= seg;
876 data += seg;
877 ++gfn;
878 }
879 return 0;
880}
881EXPORT_SYMBOL_GPL(kvm_read_guest);
882
7ec54588
MT
883int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
884 unsigned long len)
885{
886 int r;
887 unsigned long addr;
888 gfn_t gfn = gpa >> PAGE_SHIFT;
889 int offset = offset_in_page(gpa);
890
891 addr = gfn_to_hva(kvm, gfn);
892 if (kvm_is_error_hva(addr))
893 return -EFAULT;
0aac03f0 894 pagefault_disable();
7ec54588 895 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
0aac03f0 896 pagefault_enable();
7ec54588
MT
897 if (r)
898 return -EFAULT;
899 return 0;
900}
901EXPORT_SYMBOL(kvm_read_guest_atomic);
902
195aefde
IE
903int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
904 int offset, int len)
905{
e0506bcb
IE
906 int r;
907 unsigned long addr;
195aefde 908
e0506bcb
IE
909 addr = gfn_to_hva(kvm, gfn);
910 if (kvm_is_error_hva(addr))
911 return -EFAULT;
912 r = copy_to_user((void __user *)addr + offset, data, len);
913 if (r)
195aefde 914 return -EFAULT;
195aefde
IE
915 mark_page_dirty(kvm, gfn);
916 return 0;
917}
918EXPORT_SYMBOL_GPL(kvm_write_guest_page);
919
920int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
921 unsigned long len)
922{
923 gfn_t gfn = gpa >> PAGE_SHIFT;
924 int seg;
925 int offset = offset_in_page(gpa);
926 int ret;
927
928 while ((seg = next_segment(len, offset)) != 0) {
929 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
930 if (ret < 0)
931 return ret;
932 offset = 0;
933 len -= seg;
934 data += seg;
935 ++gfn;
936 }
937 return 0;
938}
939
940int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
941{
3e021bf5 942 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
195aefde
IE
943}
944EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
945
946int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
947{
948 gfn_t gfn = gpa >> PAGE_SHIFT;
949 int seg;
950 int offset = offset_in_page(gpa);
951 int ret;
952
953 while ((seg = next_segment(len, offset)) != 0) {
954 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
955 if (ret < 0)
956 return ret;
957 offset = 0;
958 len -= seg;
959 ++gfn;
960 }
961 return 0;
962}
963EXPORT_SYMBOL_GPL(kvm_clear_guest);
964
6aa8b732
AK
965void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
966{
31389947 967 struct kvm_memory_slot *memslot;
6aa8b732 968
3b6fff19 969 gfn = unalias_gfn(kvm, gfn);
7e9d619d
RR
970 memslot = __gfn_to_memslot(kvm, gfn);
971 if (memslot && memslot->dirty_bitmap) {
972 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 973
7e9d619d
RR
974 /* avoid RMW */
975 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
976 set_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
977 }
978}
979
b6958ce4
ED
980/*
981 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
982 */
8776e519 983void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 984{
e5c239cf
MT
985 DEFINE_WAIT(wait);
986
987 for (;;) {
988 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
989
d7690175
MT
990 if (kvm_cpu_has_interrupt(vcpu) ||
991 kvm_cpu_has_pending_timer(vcpu) ||
992 kvm_arch_vcpu_runnable(vcpu)) {
993 set_bit(KVM_REQ_UNHALT, &vcpu->requests);
e5c239cf 994 break;
d7690175 995 }
e5c239cf
MT
996 if (signal_pending(current))
997 break;
998
b6958ce4
ED
999 vcpu_put(vcpu);
1000 schedule();
1001 vcpu_load(vcpu);
1002 }
d3bef15f 1003
e5c239cf 1004 finish_wait(&vcpu->wq, &wait);
b6958ce4
ED
1005}
1006
6aa8b732
AK
1007void kvm_resched(struct kvm_vcpu *vcpu)
1008{
3fca0365
YD
1009 if (!need_resched())
1010 return;
6aa8b732 1011 cond_resched();
6aa8b732
AK
1012}
1013EXPORT_SYMBOL_GPL(kvm_resched);
1014
e4a533a4 1015static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9a2bb7f4
AK
1016{
1017 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
9a2bb7f4
AK
1018 struct page *page;
1019
e4a533a4 1020 if (vmf->pgoff == 0)
039576c0 1021 page = virt_to_page(vcpu->run);
09566765 1022#ifdef CONFIG_X86
e4a533a4 1023 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
ad312c7c 1024 page = virt_to_page(vcpu->arch.pio_data);
5f94c174
LV
1025#endif
1026#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1027 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1028 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
09566765 1029#endif
039576c0 1030 else
e4a533a4 1031 return VM_FAULT_SIGBUS;
9a2bb7f4 1032 get_page(page);
e4a533a4 1033 vmf->page = page;
1034 return 0;
9a2bb7f4
AK
1035}
1036
1037static struct vm_operations_struct kvm_vcpu_vm_ops = {
e4a533a4 1038 .fault = kvm_vcpu_fault,
9a2bb7f4
AK
1039};
1040
1041static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1042{
1043 vma->vm_ops = &kvm_vcpu_vm_ops;
1044 return 0;
1045}
1046
bccf2150
AK
1047static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1048{
1049 struct kvm_vcpu *vcpu = filp->private_data;
1050
66c0b394 1051 kvm_put_kvm(vcpu->kvm);
bccf2150
AK
1052 return 0;
1053}
1054
5c502742 1055static const struct file_operations kvm_vcpu_fops = {
bccf2150
AK
1056 .release = kvm_vcpu_release,
1057 .unlocked_ioctl = kvm_vcpu_ioctl,
1058 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 1059 .mmap = kvm_vcpu_mmap,
bccf2150
AK
1060};
1061
1062/*
1063 * Allocates an inode for the vcpu.
1064 */
1065static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1066{
7d9dbca3 1067 int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
2030a42c 1068 if (fd < 0)
66c0b394 1069 kvm_put_kvm(vcpu->kvm);
bccf2150 1070 return fd;
bccf2150
AK
1071}
1072
c5ea7660
AK
1073/*
1074 * Creates some virtual cpus. Good luck creating more than one.
1075 */
1076static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
1077{
1078 int r;
1079 struct kvm_vcpu *vcpu;
1080
c5ea7660 1081 if (!valid_vcpu(n))
fb3f0f51 1082 return -EINVAL;
c5ea7660 1083
e9b11c17 1084 vcpu = kvm_arch_vcpu_create(kvm, n);
fb3f0f51
RR
1085 if (IS_ERR(vcpu))
1086 return PTR_ERR(vcpu);
c5ea7660 1087
15ad7146
AK
1088 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1089
26e5215f
AK
1090 r = kvm_arch_vcpu_setup(vcpu);
1091 if (r)
1092 goto vcpu_destroy;
1093
11ec2804 1094 mutex_lock(&kvm->lock);
fb3f0f51
RR
1095 if (kvm->vcpus[n]) {
1096 r = -EEXIST;
11ec2804 1097 mutex_unlock(&kvm->lock);
e9b11c17 1098 goto vcpu_destroy;
fb3f0f51
RR
1099 }
1100 kvm->vcpus[n] = vcpu;
11ec2804 1101 mutex_unlock(&kvm->lock);
c5ea7660 1102
fb3f0f51 1103 /* Now it's all set up, let userspace reach it */
66c0b394 1104 kvm_get_kvm(kvm);
bccf2150
AK
1105 r = create_vcpu_fd(vcpu);
1106 if (r < 0)
fb3f0f51
RR
1107 goto unlink;
1108 return r;
39c3b86e 1109
fb3f0f51 1110unlink:
11ec2804 1111 mutex_lock(&kvm->lock);
fb3f0f51 1112 kvm->vcpus[n] = NULL;
11ec2804 1113 mutex_unlock(&kvm->lock);
e9b11c17 1114vcpu_destroy:
d40ccc62 1115 kvm_arch_vcpu_destroy(vcpu);
c5ea7660
AK
1116 return r;
1117}
1118
1961d276
AK
1119static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1120{
1121 if (sigset) {
1122 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1123 vcpu->sigset_active = 1;
1124 vcpu->sigset = *sigset;
1125 } else
1126 vcpu->sigset_active = 0;
1127 return 0;
1128}
1129
bccf2150
AK
1130static long kvm_vcpu_ioctl(struct file *filp,
1131 unsigned int ioctl, unsigned long arg)
6aa8b732 1132{
bccf2150 1133 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 1134 void __user *argp = (void __user *)arg;
313a3dc7 1135 int r;
fa3795a7
DH
1136 struct kvm_fpu *fpu = NULL;
1137 struct kvm_sregs *kvm_sregs = NULL;
6aa8b732 1138
6d4e4c4f
AK
1139 if (vcpu->kvm->mm != current->mm)
1140 return -EIO;
6aa8b732 1141 switch (ioctl) {
9a2bb7f4 1142 case KVM_RUN:
f0fe5108
AK
1143 r = -EINVAL;
1144 if (arg)
1145 goto out;
b6c7a5dc 1146 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 1147 break;
6aa8b732 1148 case KVM_GET_REGS: {
3e4bb3ac 1149 struct kvm_regs *kvm_regs;
6aa8b732 1150
3e4bb3ac
XZ
1151 r = -ENOMEM;
1152 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1153 if (!kvm_regs)
6aa8b732 1154 goto out;
3e4bb3ac
XZ
1155 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1156 if (r)
1157 goto out_free1;
6aa8b732 1158 r = -EFAULT;
3e4bb3ac
XZ
1159 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1160 goto out_free1;
6aa8b732 1161 r = 0;
3e4bb3ac
XZ
1162out_free1:
1163 kfree(kvm_regs);
6aa8b732
AK
1164 break;
1165 }
1166 case KVM_SET_REGS: {
3e4bb3ac 1167 struct kvm_regs *kvm_regs;
6aa8b732 1168
3e4bb3ac
XZ
1169 r = -ENOMEM;
1170 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1171 if (!kvm_regs)
6aa8b732 1172 goto out;
3e4bb3ac
XZ
1173 r = -EFAULT;
1174 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1175 goto out_free2;
1176 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
6aa8b732 1177 if (r)
3e4bb3ac 1178 goto out_free2;
6aa8b732 1179 r = 0;
3e4bb3ac
XZ
1180out_free2:
1181 kfree(kvm_regs);
6aa8b732
AK
1182 break;
1183 }
1184 case KVM_GET_SREGS: {
fa3795a7
DH
1185 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1186 r = -ENOMEM;
1187 if (!kvm_sregs)
1188 goto out;
1189 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1190 if (r)
1191 goto out;
1192 r = -EFAULT;
fa3795a7 1193 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
6aa8b732
AK
1194 goto out;
1195 r = 0;
1196 break;
1197 }
1198 case KVM_SET_SREGS: {
fa3795a7
DH
1199 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1200 r = -ENOMEM;
1201 if (!kvm_sregs)
1202 goto out;
6aa8b732 1203 r = -EFAULT;
fa3795a7 1204 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
6aa8b732 1205 goto out;
fa3795a7 1206 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1207 if (r)
1208 goto out;
1209 r = 0;
1210 break;
1211 }
62d9f0db
MT
1212 case KVM_GET_MP_STATE: {
1213 struct kvm_mp_state mp_state;
1214
1215 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1216 if (r)
1217 goto out;
1218 r = -EFAULT;
1219 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1220 goto out;
1221 r = 0;
1222 break;
1223 }
1224 case KVM_SET_MP_STATE: {
1225 struct kvm_mp_state mp_state;
1226
1227 r = -EFAULT;
1228 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1229 goto out;
1230 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1231 if (r)
1232 goto out;
1233 r = 0;
1234 break;
1235 }
6aa8b732
AK
1236 case KVM_TRANSLATE: {
1237 struct kvm_translation tr;
1238
1239 r = -EFAULT;
2f366987 1240 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 1241 goto out;
8b006791 1242 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
1243 if (r)
1244 goto out;
1245 r = -EFAULT;
2f366987 1246 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
1247 goto out;
1248 r = 0;
1249 break;
1250 }
6aa8b732
AK
1251 case KVM_DEBUG_GUEST: {
1252 struct kvm_debug_guest dbg;
1253
1254 r = -EFAULT;
2f366987 1255 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 1256 goto out;
b6c7a5dc 1257 r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
6aa8b732
AK
1258 if (r)
1259 goto out;
1260 r = 0;
1261 break;
1262 }
1961d276
AK
1263 case KVM_SET_SIGNAL_MASK: {
1264 struct kvm_signal_mask __user *sigmask_arg = argp;
1265 struct kvm_signal_mask kvm_sigmask;
1266 sigset_t sigset, *p;
1267
1268 p = NULL;
1269 if (argp) {
1270 r = -EFAULT;
1271 if (copy_from_user(&kvm_sigmask, argp,
1272 sizeof kvm_sigmask))
1273 goto out;
1274 r = -EINVAL;
1275 if (kvm_sigmask.len != sizeof sigset)
1276 goto out;
1277 r = -EFAULT;
1278 if (copy_from_user(&sigset, sigmask_arg->sigset,
1279 sizeof sigset))
1280 goto out;
1281 p = &sigset;
1282 }
1283 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1284 break;
1285 }
b8836737 1286 case KVM_GET_FPU: {
fa3795a7
DH
1287 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1288 r = -ENOMEM;
1289 if (!fpu)
1290 goto out;
1291 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
b8836737
AK
1292 if (r)
1293 goto out;
1294 r = -EFAULT;
fa3795a7 1295 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
b8836737
AK
1296 goto out;
1297 r = 0;
1298 break;
1299 }
1300 case KVM_SET_FPU: {
fa3795a7
DH
1301 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1302 r = -ENOMEM;
1303 if (!fpu)
1304 goto out;
b8836737 1305 r = -EFAULT;
fa3795a7 1306 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
b8836737 1307 goto out;
fa3795a7 1308 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
b8836737
AK
1309 if (r)
1310 goto out;
1311 r = 0;
1312 break;
1313 }
bccf2150 1314 default:
313a3dc7 1315 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
1316 }
1317out:
fa3795a7
DH
1318 kfree(fpu);
1319 kfree(kvm_sregs);
bccf2150
AK
1320 return r;
1321}
1322
1323static long kvm_vm_ioctl(struct file *filp,
1324 unsigned int ioctl, unsigned long arg)
1325{
1326 struct kvm *kvm = filp->private_data;
1327 void __user *argp = (void __user *)arg;
1fe779f8 1328 int r;
bccf2150 1329
6d4e4c4f
AK
1330 if (kvm->mm != current->mm)
1331 return -EIO;
bccf2150
AK
1332 switch (ioctl) {
1333 case KVM_CREATE_VCPU:
1334 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1335 if (r < 0)
1336 goto out;
1337 break;
6fc138d2
IE
1338 case KVM_SET_USER_MEMORY_REGION: {
1339 struct kvm_userspace_memory_region kvm_userspace_mem;
1340
1341 r = -EFAULT;
1342 if (copy_from_user(&kvm_userspace_mem, argp,
1343 sizeof kvm_userspace_mem))
1344 goto out;
1345
1346 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
1347 if (r)
1348 goto out;
1349 break;
1350 }
1351 case KVM_GET_DIRTY_LOG: {
1352 struct kvm_dirty_log log;
1353
1354 r = -EFAULT;
2f366987 1355 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 1356 goto out;
2c6f5df9 1357 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
1358 if (r)
1359 goto out;
1360 break;
1361 }
5f94c174
LV
1362#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1363 case KVM_REGISTER_COALESCED_MMIO: {
1364 struct kvm_coalesced_mmio_zone zone;
1365 r = -EFAULT;
1366 if (copy_from_user(&zone, argp, sizeof zone))
1367 goto out;
1368 r = -ENXIO;
1369 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
1370 if (r)
1371 goto out;
1372 r = 0;
1373 break;
1374 }
1375 case KVM_UNREGISTER_COALESCED_MMIO: {
1376 struct kvm_coalesced_mmio_zone zone;
1377 r = -EFAULT;
1378 if (copy_from_user(&zone, argp, sizeof zone))
1379 goto out;
1380 r = -ENXIO;
1381 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
1382 if (r)
1383 goto out;
1384 r = 0;
1385 break;
1386 }
1387#endif
f17abe9a 1388 default:
1fe779f8 1389 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
1390 }
1391out:
1392 return r;
1393}
1394
e4a533a4 1395static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
f17abe9a 1396{
777b3f49
MT
1397 struct page *page[1];
1398 unsigned long addr;
1399 int npages;
1400 gfn_t gfn = vmf->pgoff;
f17abe9a 1401 struct kvm *kvm = vma->vm_file->private_data;
f17abe9a 1402
777b3f49
MT
1403 addr = gfn_to_hva(kvm, gfn);
1404 if (kvm_is_error_hva(addr))
e4a533a4 1405 return VM_FAULT_SIGBUS;
777b3f49
MT
1406
1407 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
1408 NULL);
1409 if (unlikely(npages != 1))
e4a533a4 1410 return VM_FAULT_SIGBUS;
777b3f49
MT
1411
1412 vmf->page = page[0];
e4a533a4 1413 return 0;
f17abe9a
AK
1414}
1415
1416static struct vm_operations_struct kvm_vm_vm_ops = {
e4a533a4 1417 .fault = kvm_vm_fault,
f17abe9a
AK
1418};
1419
1420static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1421{
1422 vma->vm_ops = &kvm_vm_vm_ops;
1423 return 0;
1424}
1425
5c502742 1426static const struct file_operations kvm_vm_fops = {
f17abe9a
AK
1427 .release = kvm_vm_release,
1428 .unlocked_ioctl = kvm_vm_ioctl,
1429 .compat_ioctl = kvm_vm_ioctl,
1430 .mmap = kvm_vm_mmap,
1431};
1432
1433static int kvm_dev_ioctl_create_vm(void)
1434{
2030a42c 1435 int fd;
f17abe9a
AK
1436 struct kvm *kvm;
1437
f17abe9a 1438 kvm = kvm_create_vm();
d6d28168
AK
1439 if (IS_ERR(kvm))
1440 return PTR_ERR(kvm);
7d9dbca3 1441 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
2030a42c 1442 if (fd < 0)
66c0b394 1443 kvm_put_kvm(kvm);
f17abe9a 1444
f17abe9a 1445 return fd;
f17abe9a
AK
1446}
1447
1448static long kvm_dev_ioctl(struct file *filp,
1449 unsigned int ioctl, unsigned long arg)
1450{
07c45a36 1451 long r = -EINVAL;
f17abe9a
AK
1452
1453 switch (ioctl) {
1454 case KVM_GET_API_VERSION:
f0fe5108
AK
1455 r = -EINVAL;
1456 if (arg)
1457 goto out;
f17abe9a
AK
1458 r = KVM_API_VERSION;
1459 break;
1460 case KVM_CREATE_VM:
f0fe5108
AK
1461 r = -EINVAL;
1462 if (arg)
1463 goto out;
f17abe9a
AK
1464 r = kvm_dev_ioctl_create_vm();
1465 break;
018d00d2 1466 case KVM_CHECK_EXTENSION:
1e1c65e0 1467 r = kvm_dev_ioctl_check_extension(arg);
5d308f45 1468 break;
07c45a36
AK
1469 case KVM_GET_VCPU_MMAP_SIZE:
1470 r = -EINVAL;
1471 if (arg)
1472 goto out;
adb1ff46
AK
1473 r = PAGE_SIZE; /* struct kvm_run */
1474#ifdef CONFIG_X86
1475 r += PAGE_SIZE; /* pio data page */
5f94c174
LV
1476#endif
1477#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1478 r += PAGE_SIZE; /* coalesced mmio ring page */
adb1ff46 1479#endif
07c45a36 1480 break;
d4c9ff2d
FEL
1481 case KVM_TRACE_ENABLE:
1482 case KVM_TRACE_PAUSE:
1483 case KVM_TRACE_DISABLE:
1484 r = kvm_trace_ioctl(ioctl, arg);
1485 break;
6aa8b732 1486 default:
043405e1 1487 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
1488 }
1489out:
1490 return r;
1491}
1492
6aa8b732 1493static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
1494 .unlocked_ioctl = kvm_dev_ioctl,
1495 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
1496};
1497
1498static struct miscdevice kvm_dev = {
bbe4432e 1499 KVM_MINOR,
6aa8b732
AK
1500 "kvm",
1501 &kvm_chardev_ops,
1502};
1503
1b6c0168
AK
1504static void hardware_enable(void *junk)
1505{
1506 int cpu = raw_smp_processor_id();
1507
1508 if (cpu_isset(cpu, cpus_hardware_enabled))
1509 return;
1510 cpu_set(cpu, cpus_hardware_enabled);
e9b11c17 1511 kvm_arch_hardware_enable(NULL);
1b6c0168
AK
1512}
1513
1514static void hardware_disable(void *junk)
1515{
1516 int cpu = raw_smp_processor_id();
1517
1518 if (!cpu_isset(cpu, cpus_hardware_enabled))
1519 return;
1520 cpu_clear(cpu, cpus_hardware_enabled);
e9b11c17 1521 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
1522}
1523
774c47f1
AK
1524static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1525 void *v)
1526{
1527 int cpu = (long)v;
1528
1a6f4d7f 1529 val &= ~CPU_TASKS_FROZEN;
774c47f1 1530 switch (val) {
cec9ad27 1531 case CPU_DYING:
6ec8a856
AK
1532 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1533 cpu);
1534 hardware_disable(NULL);
1535 break;
774c47f1 1536 case CPU_UP_CANCELED:
43934a38
JK
1537 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1538 cpu);
8691e5a8 1539 smp_call_function_single(cpu, hardware_disable, NULL, 1);
774c47f1 1540 break;
43934a38
JK
1541 case CPU_ONLINE:
1542 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1543 cpu);
8691e5a8 1544 smp_call_function_single(cpu, hardware_enable, NULL, 1);
774c47f1
AK
1545 break;
1546 }
1547 return NOTIFY_OK;
1548}
1549
4ecac3fd
AK
1550
1551asmlinkage void kvm_handle_fault_on_reboot(void)
1552{
1553 if (kvm_rebooting)
1554 /* spin while reset goes on */
1555 while (true)
1556 ;
1557 /* Fault while not rebooting. We want the trace. */
1558 BUG();
1559}
1560EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
1561
9a2b85c6 1562static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 1563 void *v)
9a2b85c6
RR
1564{
1565 if (val == SYS_RESTART) {
1566 /*
1567 * Some (well, at least mine) BIOSes hang on reboot if
1568 * in vmx root mode.
1569 */
1570 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
4ecac3fd 1571 kvm_rebooting = true;
15c8b6c1 1572 on_each_cpu(hardware_disable, NULL, 1);
9a2b85c6
RR
1573 }
1574 return NOTIFY_OK;
1575}
1576
1577static struct notifier_block kvm_reboot_notifier = {
1578 .notifier_call = kvm_reboot,
1579 .priority = 0,
1580};
1581
2eeb2e94
GH
1582void kvm_io_bus_init(struct kvm_io_bus *bus)
1583{
1584 memset(bus, 0, sizeof(*bus));
1585}
1586
1587void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1588{
1589 int i;
1590
1591 for (i = 0; i < bus->dev_count; i++) {
1592 struct kvm_io_device *pos = bus->devs[i];
1593
1594 kvm_iodevice_destructor(pos);
1595 }
1596}
1597
92760499
LV
1598struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
1599 gpa_t addr, int len, int is_write)
2eeb2e94
GH
1600{
1601 int i;
1602
1603 for (i = 0; i < bus->dev_count; i++) {
1604 struct kvm_io_device *pos = bus->devs[i];
1605
92760499 1606 if (pos->in_range(pos, addr, len, is_write))
2eeb2e94
GH
1607 return pos;
1608 }
1609
1610 return NULL;
1611}
1612
1613void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
1614{
1615 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
1616
1617 bus->devs[bus->dev_count++] = dev;
1618}
1619
774c47f1
AK
1620static struct notifier_block kvm_cpu_notifier = {
1621 .notifier_call = kvm_cpu_hotplug,
1622 .priority = 20, /* must be > scheduler priority */
1623};
1624
8b88b099 1625static int vm_stat_get(void *_offset, u64 *val)
ba1389b7
AK
1626{
1627 unsigned offset = (long)_offset;
ba1389b7
AK
1628 struct kvm *kvm;
1629
8b88b099 1630 *val = 0;
ba1389b7
AK
1631 spin_lock(&kvm_lock);
1632 list_for_each_entry(kvm, &vm_list, vm_list)
8b88b099 1633 *val += *(u32 *)((void *)kvm + offset);
ba1389b7 1634 spin_unlock(&kvm_lock);
8b88b099 1635 return 0;
ba1389b7
AK
1636}
1637
1638DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
1639
8b88b099 1640static int vcpu_stat_get(void *_offset, u64 *val)
1165f5fe
AK
1641{
1642 unsigned offset = (long)_offset;
1165f5fe
AK
1643 struct kvm *kvm;
1644 struct kvm_vcpu *vcpu;
1645 int i;
1646
8b88b099 1647 *val = 0;
1165f5fe
AK
1648 spin_lock(&kvm_lock);
1649 list_for_each_entry(kvm, &vm_list, vm_list)
1650 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
1651 vcpu = kvm->vcpus[i];
1652 if (vcpu)
8b88b099 1653 *val += *(u32 *)((void *)vcpu + offset);
1165f5fe
AK
1654 }
1655 spin_unlock(&kvm_lock);
8b88b099 1656 return 0;
1165f5fe
AK
1657}
1658
ba1389b7
AK
1659DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
1660
1661static struct file_operations *stat_fops[] = {
1662 [KVM_STAT_VCPU] = &vcpu_stat_fops,
1663 [KVM_STAT_VM] = &vm_stat_fops,
1664};
1165f5fe 1665
a16b043c 1666static void kvm_init_debug(void)
6aa8b732
AK
1667{
1668 struct kvm_stats_debugfs_item *p;
1669
76f7c879 1670 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 1671 for (p = debugfs_entries; p->name; ++p)
76f7c879 1672 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
1165f5fe 1673 (void *)(long)p->offset,
ba1389b7 1674 stat_fops[p->kind]);
6aa8b732
AK
1675}
1676
1677static void kvm_exit_debug(void)
1678{
1679 struct kvm_stats_debugfs_item *p;
1680
1681 for (p = debugfs_entries; p->name; ++p)
1682 debugfs_remove(p->dentry);
76f7c879 1683 debugfs_remove(kvm_debugfs_dir);
6aa8b732
AK
1684}
1685
59ae6c6b
AK
1686static int kvm_suspend(struct sys_device *dev, pm_message_t state)
1687{
4267c41a 1688 hardware_disable(NULL);
59ae6c6b
AK
1689 return 0;
1690}
1691
1692static int kvm_resume(struct sys_device *dev)
1693{
4267c41a 1694 hardware_enable(NULL);
59ae6c6b
AK
1695 return 0;
1696}
1697
1698static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 1699 .name = "kvm",
59ae6c6b
AK
1700 .suspend = kvm_suspend,
1701 .resume = kvm_resume,
1702};
1703
1704static struct sys_device kvm_sysdev = {
1705 .id = 0,
1706 .cls = &kvm_sysdev_class,
1707};
1708
cea7bb21 1709struct page *bad_page;
35149e21 1710pfn_t bad_pfn;
6aa8b732 1711
15ad7146
AK
1712static inline
1713struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
1714{
1715 return container_of(pn, struct kvm_vcpu, preempt_notifier);
1716}
1717
1718static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
1719{
1720 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1721
e9b11c17 1722 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
1723}
1724
1725static void kvm_sched_out(struct preempt_notifier *pn,
1726 struct task_struct *next)
1727{
1728 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1729
e9b11c17 1730 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
1731}
1732
f8c16bba 1733int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 1734 struct module *module)
6aa8b732
AK
1735{
1736 int r;
002c7f7c 1737 int cpu;
6aa8b732 1738
cb498ea2
ZX
1739 kvm_init_debug();
1740
f8c16bba
ZX
1741 r = kvm_arch_init(opaque);
1742 if (r)
d2308784 1743 goto out_fail;
cb498ea2
ZX
1744
1745 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1746
1747 if (bad_page == NULL) {
1748 r = -ENOMEM;
1749 goto out;
1750 }
1751
35149e21
AL
1752 bad_pfn = page_to_pfn(bad_page);
1753
e9b11c17 1754 r = kvm_arch_hardware_setup();
6aa8b732 1755 if (r < 0)
d2308784 1756 goto out_free_0;
6aa8b732 1757
002c7f7c
YS
1758 for_each_online_cpu(cpu) {
1759 smp_call_function_single(cpu,
e9b11c17 1760 kvm_arch_check_processor_compat,
8691e5a8 1761 &r, 1);
002c7f7c 1762 if (r < 0)
d2308784 1763 goto out_free_1;
002c7f7c
YS
1764 }
1765
15c8b6c1 1766 on_each_cpu(hardware_enable, NULL, 1);
774c47f1
AK
1767 r = register_cpu_notifier(&kvm_cpu_notifier);
1768 if (r)
d2308784 1769 goto out_free_2;
6aa8b732
AK
1770 register_reboot_notifier(&kvm_reboot_notifier);
1771
59ae6c6b
AK
1772 r = sysdev_class_register(&kvm_sysdev_class);
1773 if (r)
d2308784 1774 goto out_free_3;
59ae6c6b
AK
1775
1776 r = sysdev_register(&kvm_sysdev);
1777 if (r)
d2308784 1778 goto out_free_4;
59ae6c6b 1779
c16f862d
RR
1780 /* A kmem cache lets us meet the alignment requirements of fx_save. */
1781 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
56919c5c
JP
1782 __alignof__(struct kvm_vcpu),
1783 0, NULL);
c16f862d
RR
1784 if (!kvm_vcpu_cache) {
1785 r = -ENOMEM;
d2308784 1786 goto out_free_5;
c16f862d
RR
1787 }
1788
6aa8b732
AK
1789 kvm_chardev_ops.owner = module;
1790
1791 r = misc_register(&kvm_dev);
1792 if (r) {
d77c26fc 1793 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
1794 goto out_free;
1795 }
1796
15ad7146
AK
1797 kvm_preempt_ops.sched_in = kvm_sched_in;
1798 kvm_preempt_ops.sched_out = kvm_sched_out;
1799
c7addb90 1800 return 0;
6aa8b732
AK
1801
1802out_free:
c16f862d 1803 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 1804out_free_5:
59ae6c6b 1805 sysdev_unregister(&kvm_sysdev);
d2308784 1806out_free_4:
59ae6c6b 1807 sysdev_class_unregister(&kvm_sysdev_class);
d2308784 1808out_free_3:
6aa8b732 1809 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1 1810 unregister_cpu_notifier(&kvm_cpu_notifier);
d2308784 1811out_free_2:
15c8b6c1 1812 on_each_cpu(hardware_disable, NULL, 1);
d2308784 1813out_free_1:
e9b11c17 1814 kvm_arch_hardware_unsetup();
d2308784
ZX
1815out_free_0:
1816 __free_page(bad_page);
ca45aaae 1817out:
f8c16bba 1818 kvm_arch_exit();
cb498ea2 1819 kvm_exit_debug();
d2308784 1820out_fail:
6aa8b732
AK
1821 return r;
1822}
cb498ea2 1823EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 1824
cb498ea2 1825void kvm_exit(void)
6aa8b732 1826{
d4c9ff2d 1827 kvm_trace_cleanup();
6aa8b732 1828 misc_deregister(&kvm_dev);
c16f862d 1829 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
1830 sysdev_unregister(&kvm_sysdev);
1831 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 1832 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 1833 unregister_cpu_notifier(&kvm_cpu_notifier);
15c8b6c1 1834 on_each_cpu(hardware_disable, NULL, 1);
e9b11c17 1835 kvm_arch_hardware_unsetup();
f8c16bba 1836 kvm_arch_exit();
6aa8b732 1837 kvm_exit_debug();
cea7bb21 1838 __free_page(bad_page);
6aa8b732 1839}
cb498ea2 1840EXPORT_SYMBOL_GPL(kvm_exit);
This page took 0.368745 seconds and 5 git commands to generate.