KVM: Add coalesced MMIO support (common part)
[deliverable/linux.git] / virt / kvm / kvm_main.c
... / ...
CommitLineData
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "iodev.h"
19
20#include <linux/kvm_host.h>
21#include <linux/kvm.h>
22#include <linux/module.h>
23#include <linux/errno.h>
24#include <linux/percpu.h>
25#include <linux/gfp.h>
26#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
29#include <linux/reboot.h>
30#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
33#include <linux/sysdev.h>
34#include <linux/cpu.h>
35#include <linux/sched.h>
36#include <linux/cpumask.h>
37#include <linux/smp.h>
38#include <linux/anon_inodes.h>
39#include <linux/profile.h>
40#include <linux/kvm_para.h>
41#include <linux/pagemap.h>
42#include <linux/mman.h>
43#include <linux/swap.h>
44
45#include <asm/processor.h>
46#include <asm/io.h>
47#include <asm/uaccess.h>
48#include <asm/pgtable.h>
49
50#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
51#include "coalesced_mmio.h"
52#endif
53
54MODULE_AUTHOR("Qumranet");
55MODULE_LICENSE("GPL");
56
57DEFINE_SPINLOCK(kvm_lock);
58LIST_HEAD(vm_list);
59
60static cpumask_t cpus_hardware_enabled;
61
62struct kmem_cache *kvm_vcpu_cache;
63EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
64
65static __read_mostly struct preempt_ops kvm_preempt_ops;
66
67struct dentry *kvm_debugfs_dir;
68
69static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
70 unsigned long arg);
71
72bool kvm_rebooting;
73
74static inline int valid_vcpu(int n)
75{
76 return likely(n >= 0 && n < KVM_MAX_VCPUS);
77}
78
79/*
80 * Switches to specified vcpu, until a matching vcpu_put()
81 */
82void vcpu_load(struct kvm_vcpu *vcpu)
83{
84 int cpu;
85
86 mutex_lock(&vcpu->mutex);
87 cpu = get_cpu();
88 preempt_notifier_register(&vcpu->preempt_notifier);
89 kvm_arch_vcpu_load(vcpu, cpu);
90 put_cpu();
91}
92
93void vcpu_put(struct kvm_vcpu *vcpu)
94{
95 preempt_disable();
96 kvm_arch_vcpu_put(vcpu);
97 preempt_notifier_unregister(&vcpu->preempt_notifier);
98 preempt_enable();
99 mutex_unlock(&vcpu->mutex);
100}
101
102static void ack_flush(void *_completed)
103{
104}
105
106void kvm_flush_remote_tlbs(struct kvm *kvm)
107{
108 int i, cpu;
109 cpumask_t cpus;
110 struct kvm_vcpu *vcpu;
111
112 cpus_clear(cpus);
113 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
114 vcpu = kvm->vcpus[i];
115 if (!vcpu)
116 continue;
117 if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
118 continue;
119 cpu = vcpu->cpu;
120 if (cpu != -1 && cpu != raw_smp_processor_id())
121 cpu_set(cpu, cpus);
122 }
123 if (cpus_empty(cpus))
124 return;
125 ++kvm->stat.remote_tlb_flush;
126 smp_call_function_mask(cpus, ack_flush, NULL, 1);
127}
128
129void kvm_reload_remote_mmus(struct kvm *kvm)
130{
131 int i, cpu;
132 cpumask_t cpus;
133 struct kvm_vcpu *vcpu;
134
135 cpus_clear(cpus);
136 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
137 vcpu = kvm->vcpus[i];
138 if (!vcpu)
139 continue;
140 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
141 continue;
142 cpu = vcpu->cpu;
143 if (cpu != -1 && cpu != raw_smp_processor_id())
144 cpu_set(cpu, cpus);
145 }
146 if (cpus_empty(cpus))
147 return;
148 smp_call_function_mask(cpus, ack_flush, NULL, 1);
149}
150
151
152int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
153{
154 struct page *page;
155 int r;
156
157 mutex_init(&vcpu->mutex);
158 vcpu->cpu = -1;
159 vcpu->kvm = kvm;
160 vcpu->vcpu_id = id;
161 init_waitqueue_head(&vcpu->wq);
162
163 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
164 if (!page) {
165 r = -ENOMEM;
166 goto fail;
167 }
168 vcpu->run = page_address(page);
169
170 r = kvm_arch_vcpu_init(vcpu);
171 if (r < 0)
172 goto fail_free_run;
173 return 0;
174
175fail_free_run:
176 free_page((unsigned long)vcpu->run);
177fail:
178 return r;
179}
180EXPORT_SYMBOL_GPL(kvm_vcpu_init);
181
182void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
183{
184 kvm_arch_vcpu_uninit(vcpu);
185 free_page((unsigned long)vcpu->run);
186}
187EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
188
189static struct kvm *kvm_create_vm(void)
190{
191 struct kvm *kvm = kvm_arch_create_vm();
192#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
193 struct page *page;
194#endif
195
196 if (IS_ERR(kvm))
197 goto out;
198
199#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
200 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
201 if (!page) {
202 kfree(kvm);
203 return ERR_PTR(-ENOMEM);
204 }
205 kvm->coalesced_mmio_ring =
206 (struct kvm_coalesced_mmio_ring *)page_address(page);
207#endif
208
209 kvm->mm = current->mm;
210 atomic_inc(&kvm->mm->mm_count);
211 spin_lock_init(&kvm->mmu_lock);
212 kvm_io_bus_init(&kvm->pio_bus);
213 mutex_init(&kvm->lock);
214 kvm_io_bus_init(&kvm->mmio_bus);
215 init_rwsem(&kvm->slots_lock);
216 atomic_set(&kvm->users_count, 1);
217 spin_lock(&kvm_lock);
218 list_add(&kvm->vm_list, &vm_list);
219 spin_unlock(&kvm_lock);
220#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
221 kvm_coalesced_mmio_init(kvm);
222#endif
223out:
224 return kvm;
225}
226
227/*
228 * Free any memory in @free but not in @dont.
229 */
230static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
231 struct kvm_memory_slot *dont)
232{
233 if (!dont || free->rmap != dont->rmap)
234 vfree(free->rmap);
235
236 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
237 vfree(free->dirty_bitmap);
238
239 if (!dont || free->lpage_info != dont->lpage_info)
240 vfree(free->lpage_info);
241
242 free->npages = 0;
243 free->dirty_bitmap = NULL;
244 free->rmap = NULL;
245 free->lpage_info = NULL;
246}
247
248void kvm_free_physmem(struct kvm *kvm)
249{
250 int i;
251
252 for (i = 0; i < kvm->nmemslots; ++i)
253 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
254}
255
256static void kvm_destroy_vm(struct kvm *kvm)
257{
258 struct mm_struct *mm = kvm->mm;
259
260 spin_lock(&kvm_lock);
261 list_del(&kvm->vm_list);
262 spin_unlock(&kvm_lock);
263 kvm_io_bus_destroy(&kvm->pio_bus);
264 kvm_io_bus_destroy(&kvm->mmio_bus);
265#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
266 if (kvm->coalesced_mmio_ring != NULL)
267 free_page((unsigned long)kvm->coalesced_mmio_ring);
268#endif
269 kvm_arch_destroy_vm(kvm);
270 mmdrop(mm);
271}
272
273void kvm_get_kvm(struct kvm *kvm)
274{
275 atomic_inc(&kvm->users_count);
276}
277EXPORT_SYMBOL_GPL(kvm_get_kvm);
278
279void kvm_put_kvm(struct kvm *kvm)
280{
281 if (atomic_dec_and_test(&kvm->users_count))
282 kvm_destroy_vm(kvm);
283}
284EXPORT_SYMBOL_GPL(kvm_put_kvm);
285
286
287static int kvm_vm_release(struct inode *inode, struct file *filp)
288{
289 struct kvm *kvm = filp->private_data;
290
291 kvm_put_kvm(kvm);
292 return 0;
293}
294
295/*
296 * Allocate some memory and give it an address in the guest physical address
297 * space.
298 *
299 * Discontiguous memory is allowed, mostly for framebuffers.
300 *
301 * Must be called holding mmap_sem for write.
302 */
303int __kvm_set_memory_region(struct kvm *kvm,
304 struct kvm_userspace_memory_region *mem,
305 int user_alloc)
306{
307 int r;
308 gfn_t base_gfn;
309 unsigned long npages;
310 unsigned long i;
311 struct kvm_memory_slot *memslot;
312 struct kvm_memory_slot old, new;
313
314 r = -EINVAL;
315 /* General sanity checks */
316 if (mem->memory_size & (PAGE_SIZE - 1))
317 goto out;
318 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
319 goto out;
320 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
321 goto out;
322 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
323 goto out;
324
325 memslot = &kvm->memslots[mem->slot];
326 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
327 npages = mem->memory_size >> PAGE_SHIFT;
328
329 if (!npages)
330 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
331
332 new = old = *memslot;
333
334 new.base_gfn = base_gfn;
335 new.npages = npages;
336 new.flags = mem->flags;
337
338 /* Disallow changing a memory slot's size. */
339 r = -EINVAL;
340 if (npages && old.npages && npages != old.npages)
341 goto out_free;
342
343 /* Check for overlaps */
344 r = -EEXIST;
345 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
346 struct kvm_memory_slot *s = &kvm->memslots[i];
347
348 if (s == memslot)
349 continue;
350 if (!((base_gfn + npages <= s->base_gfn) ||
351 (base_gfn >= s->base_gfn + s->npages)))
352 goto out_free;
353 }
354
355 /* Free page dirty bitmap if unneeded */
356 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
357 new.dirty_bitmap = NULL;
358
359 r = -ENOMEM;
360
361 /* Allocate if a slot is being created */
362 if (npages && !new.rmap) {
363 new.rmap = vmalloc(npages * sizeof(struct page *));
364
365 if (!new.rmap)
366 goto out_free;
367
368 memset(new.rmap, 0, npages * sizeof(*new.rmap));
369
370 new.user_alloc = user_alloc;
371 new.userspace_addr = mem->userspace_addr;
372 }
373 if (npages && !new.lpage_info) {
374 int largepages = npages / KVM_PAGES_PER_HPAGE;
375 if (npages % KVM_PAGES_PER_HPAGE)
376 largepages++;
377 if (base_gfn % KVM_PAGES_PER_HPAGE)
378 largepages++;
379
380 new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
381
382 if (!new.lpage_info)
383 goto out_free;
384
385 memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
386
387 if (base_gfn % KVM_PAGES_PER_HPAGE)
388 new.lpage_info[0].write_count = 1;
389 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
390 new.lpage_info[largepages-1].write_count = 1;
391 }
392
393 /* Allocate page dirty bitmap if needed */
394 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
395 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
396
397 new.dirty_bitmap = vmalloc(dirty_bytes);
398 if (!new.dirty_bitmap)
399 goto out_free;
400 memset(new.dirty_bitmap, 0, dirty_bytes);
401 }
402
403 if (mem->slot >= kvm->nmemslots)
404 kvm->nmemslots = mem->slot + 1;
405
406 *memslot = new;
407
408 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
409 if (r) {
410 *memslot = old;
411 goto out_free;
412 }
413
414 kvm_free_physmem_slot(&old, &new);
415 return 0;
416
417out_free:
418 kvm_free_physmem_slot(&new, &old);
419out:
420 return r;
421
422}
423EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
424
425int kvm_set_memory_region(struct kvm *kvm,
426 struct kvm_userspace_memory_region *mem,
427 int user_alloc)
428{
429 int r;
430
431 down_write(&kvm->slots_lock);
432 r = __kvm_set_memory_region(kvm, mem, user_alloc);
433 up_write(&kvm->slots_lock);
434 return r;
435}
436EXPORT_SYMBOL_GPL(kvm_set_memory_region);
437
438int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
439 struct
440 kvm_userspace_memory_region *mem,
441 int user_alloc)
442{
443 if (mem->slot >= KVM_MEMORY_SLOTS)
444 return -EINVAL;
445 return kvm_set_memory_region(kvm, mem, user_alloc);
446}
447
448int kvm_get_dirty_log(struct kvm *kvm,
449 struct kvm_dirty_log *log, int *is_dirty)
450{
451 struct kvm_memory_slot *memslot;
452 int r, i;
453 int n;
454 unsigned long any = 0;
455
456 r = -EINVAL;
457 if (log->slot >= KVM_MEMORY_SLOTS)
458 goto out;
459
460 memslot = &kvm->memslots[log->slot];
461 r = -ENOENT;
462 if (!memslot->dirty_bitmap)
463 goto out;
464
465 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
466
467 for (i = 0; !any && i < n/sizeof(long); ++i)
468 any = memslot->dirty_bitmap[i];
469
470 r = -EFAULT;
471 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
472 goto out;
473
474 if (any)
475 *is_dirty = 1;
476
477 r = 0;
478out:
479 return r;
480}
481
482int is_error_page(struct page *page)
483{
484 return page == bad_page;
485}
486EXPORT_SYMBOL_GPL(is_error_page);
487
488int is_error_pfn(pfn_t pfn)
489{
490 return pfn == bad_pfn;
491}
492EXPORT_SYMBOL_GPL(is_error_pfn);
493
494static inline unsigned long bad_hva(void)
495{
496 return PAGE_OFFSET;
497}
498
499int kvm_is_error_hva(unsigned long addr)
500{
501 return addr == bad_hva();
502}
503EXPORT_SYMBOL_GPL(kvm_is_error_hva);
504
505static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
506{
507 int i;
508
509 for (i = 0; i < kvm->nmemslots; ++i) {
510 struct kvm_memory_slot *memslot = &kvm->memslots[i];
511
512 if (gfn >= memslot->base_gfn
513 && gfn < memslot->base_gfn + memslot->npages)
514 return memslot;
515 }
516 return NULL;
517}
518
519struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
520{
521 gfn = unalias_gfn(kvm, gfn);
522 return __gfn_to_memslot(kvm, gfn);
523}
524
525int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
526{
527 int i;
528
529 gfn = unalias_gfn(kvm, gfn);
530 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
531 struct kvm_memory_slot *memslot = &kvm->memslots[i];
532
533 if (gfn >= memslot->base_gfn
534 && gfn < memslot->base_gfn + memslot->npages)
535 return 1;
536 }
537 return 0;
538}
539EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
540
541unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
542{
543 struct kvm_memory_slot *slot;
544
545 gfn = unalias_gfn(kvm, gfn);
546 slot = __gfn_to_memslot(kvm, gfn);
547 if (!slot)
548 return bad_hva();
549 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
550}
551EXPORT_SYMBOL_GPL(gfn_to_hva);
552
553/*
554 * Requires current->mm->mmap_sem to be held
555 */
556pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
557{
558 struct page *page[1];
559 unsigned long addr;
560 int npages;
561 pfn_t pfn;
562
563 might_sleep();
564
565 addr = gfn_to_hva(kvm, gfn);
566 if (kvm_is_error_hva(addr)) {
567 get_page(bad_page);
568 return page_to_pfn(bad_page);
569 }
570
571 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
572 NULL);
573
574 if (unlikely(npages != 1)) {
575 struct vm_area_struct *vma;
576
577 vma = find_vma(current->mm, addr);
578 if (vma == NULL || addr < vma->vm_start ||
579 !(vma->vm_flags & VM_PFNMAP)) {
580 get_page(bad_page);
581 return page_to_pfn(bad_page);
582 }
583
584 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
585 BUG_ON(pfn_valid(pfn));
586 } else
587 pfn = page_to_pfn(page[0]);
588
589 return pfn;
590}
591
592EXPORT_SYMBOL_GPL(gfn_to_pfn);
593
594struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
595{
596 pfn_t pfn;
597
598 pfn = gfn_to_pfn(kvm, gfn);
599 if (pfn_valid(pfn))
600 return pfn_to_page(pfn);
601
602 WARN_ON(!pfn_valid(pfn));
603
604 get_page(bad_page);
605 return bad_page;
606}
607
608EXPORT_SYMBOL_GPL(gfn_to_page);
609
610void kvm_release_page_clean(struct page *page)
611{
612 kvm_release_pfn_clean(page_to_pfn(page));
613}
614EXPORT_SYMBOL_GPL(kvm_release_page_clean);
615
616void kvm_release_pfn_clean(pfn_t pfn)
617{
618 if (pfn_valid(pfn))
619 put_page(pfn_to_page(pfn));
620}
621EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
622
623void kvm_release_page_dirty(struct page *page)
624{
625 kvm_release_pfn_dirty(page_to_pfn(page));
626}
627EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
628
629void kvm_release_pfn_dirty(pfn_t pfn)
630{
631 kvm_set_pfn_dirty(pfn);
632 kvm_release_pfn_clean(pfn);
633}
634EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
635
636void kvm_set_page_dirty(struct page *page)
637{
638 kvm_set_pfn_dirty(page_to_pfn(page));
639}
640EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
641
642void kvm_set_pfn_dirty(pfn_t pfn)
643{
644 if (pfn_valid(pfn)) {
645 struct page *page = pfn_to_page(pfn);
646 if (!PageReserved(page))
647 SetPageDirty(page);
648 }
649}
650EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
651
652void kvm_set_pfn_accessed(pfn_t pfn)
653{
654 if (pfn_valid(pfn))
655 mark_page_accessed(pfn_to_page(pfn));
656}
657EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
658
659void kvm_get_pfn(pfn_t pfn)
660{
661 if (pfn_valid(pfn))
662 get_page(pfn_to_page(pfn));
663}
664EXPORT_SYMBOL_GPL(kvm_get_pfn);
665
666static int next_segment(unsigned long len, int offset)
667{
668 if (len > PAGE_SIZE - offset)
669 return PAGE_SIZE - offset;
670 else
671 return len;
672}
673
674int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
675 int len)
676{
677 int r;
678 unsigned long addr;
679
680 addr = gfn_to_hva(kvm, gfn);
681 if (kvm_is_error_hva(addr))
682 return -EFAULT;
683 r = copy_from_user(data, (void __user *)addr + offset, len);
684 if (r)
685 return -EFAULT;
686 return 0;
687}
688EXPORT_SYMBOL_GPL(kvm_read_guest_page);
689
690int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
691{
692 gfn_t gfn = gpa >> PAGE_SHIFT;
693 int seg;
694 int offset = offset_in_page(gpa);
695 int ret;
696
697 while ((seg = next_segment(len, offset)) != 0) {
698 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
699 if (ret < 0)
700 return ret;
701 offset = 0;
702 len -= seg;
703 data += seg;
704 ++gfn;
705 }
706 return 0;
707}
708EXPORT_SYMBOL_GPL(kvm_read_guest);
709
710int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
711 unsigned long len)
712{
713 int r;
714 unsigned long addr;
715 gfn_t gfn = gpa >> PAGE_SHIFT;
716 int offset = offset_in_page(gpa);
717
718 addr = gfn_to_hva(kvm, gfn);
719 if (kvm_is_error_hva(addr))
720 return -EFAULT;
721 pagefault_disable();
722 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
723 pagefault_enable();
724 if (r)
725 return -EFAULT;
726 return 0;
727}
728EXPORT_SYMBOL(kvm_read_guest_atomic);
729
730int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
731 int offset, int len)
732{
733 int r;
734 unsigned long addr;
735
736 addr = gfn_to_hva(kvm, gfn);
737 if (kvm_is_error_hva(addr))
738 return -EFAULT;
739 r = copy_to_user((void __user *)addr + offset, data, len);
740 if (r)
741 return -EFAULT;
742 mark_page_dirty(kvm, gfn);
743 return 0;
744}
745EXPORT_SYMBOL_GPL(kvm_write_guest_page);
746
747int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
748 unsigned long len)
749{
750 gfn_t gfn = gpa >> PAGE_SHIFT;
751 int seg;
752 int offset = offset_in_page(gpa);
753 int ret;
754
755 while ((seg = next_segment(len, offset)) != 0) {
756 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
757 if (ret < 0)
758 return ret;
759 offset = 0;
760 len -= seg;
761 data += seg;
762 ++gfn;
763 }
764 return 0;
765}
766
767int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
768{
769 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
770}
771EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
772
773int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
774{
775 gfn_t gfn = gpa >> PAGE_SHIFT;
776 int seg;
777 int offset = offset_in_page(gpa);
778 int ret;
779
780 while ((seg = next_segment(len, offset)) != 0) {
781 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
782 if (ret < 0)
783 return ret;
784 offset = 0;
785 len -= seg;
786 ++gfn;
787 }
788 return 0;
789}
790EXPORT_SYMBOL_GPL(kvm_clear_guest);
791
792void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
793{
794 struct kvm_memory_slot *memslot;
795
796 gfn = unalias_gfn(kvm, gfn);
797 memslot = __gfn_to_memslot(kvm, gfn);
798 if (memslot && memslot->dirty_bitmap) {
799 unsigned long rel_gfn = gfn - memslot->base_gfn;
800
801 /* avoid RMW */
802 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
803 set_bit(rel_gfn, memslot->dirty_bitmap);
804 }
805}
806
807/*
808 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
809 */
810void kvm_vcpu_block(struct kvm_vcpu *vcpu)
811{
812 DEFINE_WAIT(wait);
813
814 for (;;) {
815 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
816
817 if (kvm_cpu_has_interrupt(vcpu))
818 break;
819 if (kvm_cpu_has_pending_timer(vcpu))
820 break;
821 if (kvm_arch_vcpu_runnable(vcpu))
822 break;
823 if (signal_pending(current))
824 break;
825
826 vcpu_put(vcpu);
827 schedule();
828 vcpu_load(vcpu);
829 }
830
831 finish_wait(&vcpu->wq, &wait);
832}
833
834void kvm_resched(struct kvm_vcpu *vcpu)
835{
836 if (!need_resched())
837 return;
838 cond_resched();
839}
840EXPORT_SYMBOL_GPL(kvm_resched);
841
842static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
843{
844 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
845 struct page *page;
846
847 if (vmf->pgoff == 0)
848 page = virt_to_page(vcpu->run);
849#ifdef CONFIG_X86
850 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
851 page = virt_to_page(vcpu->arch.pio_data);
852#endif
853#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
854 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
855 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
856#endif
857 else
858 return VM_FAULT_SIGBUS;
859 get_page(page);
860 vmf->page = page;
861 return 0;
862}
863
864static struct vm_operations_struct kvm_vcpu_vm_ops = {
865 .fault = kvm_vcpu_fault,
866};
867
868static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
869{
870 vma->vm_ops = &kvm_vcpu_vm_ops;
871 return 0;
872}
873
874static int kvm_vcpu_release(struct inode *inode, struct file *filp)
875{
876 struct kvm_vcpu *vcpu = filp->private_data;
877
878 kvm_put_kvm(vcpu->kvm);
879 return 0;
880}
881
882static const struct file_operations kvm_vcpu_fops = {
883 .release = kvm_vcpu_release,
884 .unlocked_ioctl = kvm_vcpu_ioctl,
885 .compat_ioctl = kvm_vcpu_ioctl,
886 .mmap = kvm_vcpu_mmap,
887};
888
889/*
890 * Allocates an inode for the vcpu.
891 */
892static int create_vcpu_fd(struct kvm_vcpu *vcpu)
893{
894 int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu);
895 if (fd < 0)
896 kvm_put_kvm(vcpu->kvm);
897 return fd;
898}
899
900/*
901 * Creates some virtual cpus. Good luck creating more than one.
902 */
903static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
904{
905 int r;
906 struct kvm_vcpu *vcpu;
907
908 if (!valid_vcpu(n))
909 return -EINVAL;
910
911 vcpu = kvm_arch_vcpu_create(kvm, n);
912 if (IS_ERR(vcpu))
913 return PTR_ERR(vcpu);
914
915 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
916
917 r = kvm_arch_vcpu_setup(vcpu);
918 if (r)
919 goto vcpu_destroy;
920
921 mutex_lock(&kvm->lock);
922 if (kvm->vcpus[n]) {
923 r = -EEXIST;
924 mutex_unlock(&kvm->lock);
925 goto vcpu_destroy;
926 }
927 kvm->vcpus[n] = vcpu;
928 mutex_unlock(&kvm->lock);
929
930 /* Now it's all set up, let userspace reach it */
931 kvm_get_kvm(kvm);
932 r = create_vcpu_fd(vcpu);
933 if (r < 0)
934 goto unlink;
935 return r;
936
937unlink:
938 mutex_lock(&kvm->lock);
939 kvm->vcpus[n] = NULL;
940 mutex_unlock(&kvm->lock);
941vcpu_destroy:
942 kvm_arch_vcpu_destroy(vcpu);
943 return r;
944}
945
946static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
947{
948 if (sigset) {
949 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
950 vcpu->sigset_active = 1;
951 vcpu->sigset = *sigset;
952 } else
953 vcpu->sigset_active = 0;
954 return 0;
955}
956
957static long kvm_vcpu_ioctl(struct file *filp,
958 unsigned int ioctl, unsigned long arg)
959{
960 struct kvm_vcpu *vcpu = filp->private_data;
961 void __user *argp = (void __user *)arg;
962 int r;
963
964 if (vcpu->kvm->mm != current->mm)
965 return -EIO;
966 switch (ioctl) {
967 case KVM_RUN:
968 r = -EINVAL;
969 if (arg)
970 goto out;
971 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
972 break;
973 case KVM_GET_REGS: {
974 struct kvm_regs *kvm_regs;
975
976 r = -ENOMEM;
977 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
978 if (!kvm_regs)
979 goto out;
980 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
981 if (r)
982 goto out_free1;
983 r = -EFAULT;
984 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
985 goto out_free1;
986 r = 0;
987out_free1:
988 kfree(kvm_regs);
989 break;
990 }
991 case KVM_SET_REGS: {
992 struct kvm_regs *kvm_regs;
993
994 r = -ENOMEM;
995 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
996 if (!kvm_regs)
997 goto out;
998 r = -EFAULT;
999 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1000 goto out_free2;
1001 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
1002 if (r)
1003 goto out_free2;
1004 r = 0;
1005out_free2:
1006 kfree(kvm_regs);
1007 break;
1008 }
1009 case KVM_GET_SREGS: {
1010 struct kvm_sregs kvm_sregs;
1011
1012 memset(&kvm_sregs, 0, sizeof kvm_sregs);
1013 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
1014 if (r)
1015 goto out;
1016 r = -EFAULT;
1017 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
1018 goto out;
1019 r = 0;
1020 break;
1021 }
1022 case KVM_SET_SREGS: {
1023 struct kvm_sregs kvm_sregs;
1024
1025 r = -EFAULT;
1026 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
1027 goto out;
1028 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
1029 if (r)
1030 goto out;
1031 r = 0;
1032 break;
1033 }
1034 case KVM_GET_MP_STATE: {
1035 struct kvm_mp_state mp_state;
1036
1037 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1038 if (r)
1039 goto out;
1040 r = -EFAULT;
1041 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1042 goto out;
1043 r = 0;
1044 break;
1045 }
1046 case KVM_SET_MP_STATE: {
1047 struct kvm_mp_state mp_state;
1048
1049 r = -EFAULT;
1050 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1051 goto out;
1052 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1053 if (r)
1054 goto out;
1055 r = 0;
1056 break;
1057 }
1058 case KVM_TRANSLATE: {
1059 struct kvm_translation tr;
1060
1061 r = -EFAULT;
1062 if (copy_from_user(&tr, argp, sizeof tr))
1063 goto out;
1064 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
1065 if (r)
1066 goto out;
1067 r = -EFAULT;
1068 if (copy_to_user(argp, &tr, sizeof tr))
1069 goto out;
1070 r = 0;
1071 break;
1072 }
1073 case KVM_DEBUG_GUEST: {
1074 struct kvm_debug_guest dbg;
1075
1076 r = -EFAULT;
1077 if (copy_from_user(&dbg, argp, sizeof dbg))
1078 goto out;
1079 r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
1080 if (r)
1081 goto out;
1082 r = 0;
1083 break;
1084 }
1085 case KVM_SET_SIGNAL_MASK: {
1086 struct kvm_signal_mask __user *sigmask_arg = argp;
1087 struct kvm_signal_mask kvm_sigmask;
1088 sigset_t sigset, *p;
1089
1090 p = NULL;
1091 if (argp) {
1092 r = -EFAULT;
1093 if (copy_from_user(&kvm_sigmask, argp,
1094 sizeof kvm_sigmask))
1095 goto out;
1096 r = -EINVAL;
1097 if (kvm_sigmask.len != sizeof sigset)
1098 goto out;
1099 r = -EFAULT;
1100 if (copy_from_user(&sigset, sigmask_arg->sigset,
1101 sizeof sigset))
1102 goto out;
1103 p = &sigset;
1104 }
1105 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1106 break;
1107 }
1108 case KVM_GET_FPU: {
1109 struct kvm_fpu fpu;
1110
1111 memset(&fpu, 0, sizeof fpu);
1112 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
1113 if (r)
1114 goto out;
1115 r = -EFAULT;
1116 if (copy_to_user(argp, &fpu, sizeof fpu))
1117 goto out;
1118 r = 0;
1119 break;
1120 }
1121 case KVM_SET_FPU: {
1122 struct kvm_fpu fpu;
1123
1124 r = -EFAULT;
1125 if (copy_from_user(&fpu, argp, sizeof fpu))
1126 goto out;
1127 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
1128 if (r)
1129 goto out;
1130 r = 0;
1131 break;
1132 }
1133 default:
1134 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1135 }
1136out:
1137 return r;
1138}
1139
1140static long kvm_vm_ioctl(struct file *filp,
1141 unsigned int ioctl, unsigned long arg)
1142{
1143 struct kvm *kvm = filp->private_data;
1144 void __user *argp = (void __user *)arg;
1145 int r;
1146
1147 if (kvm->mm != current->mm)
1148 return -EIO;
1149 switch (ioctl) {
1150 case KVM_CREATE_VCPU:
1151 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1152 if (r < 0)
1153 goto out;
1154 break;
1155 case KVM_SET_USER_MEMORY_REGION: {
1156 struct kvm_userspace_memory_region kvm_userspace_mem;
1157
1158 r = -EFAULT;
1159 if (copy_from_user(&kvm_userspace_mem, argp,
1160 sizeof kvm_userspace_mem))
1161 goto out;
1162
1163 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
1164 if (r)
1165 goto out;
1166 break;
1167 }
1168 case KVM_GET_DIRTY_LOG: {
1169 struct kvm_dirty_log log;
1170
1171 r = -EFAULT;
1172 if (copy_from_user(&log, argp, sizeof log))
1173 goto out;
1174 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1175 if (r)
1176 goto out;
1177 break;
1178 }
1179#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1180 case KVM_REGISTER_COALESCED_MMIO: {
1181 struct kvm_coalesced_mmio_zone zone;
1182 r = -EFAULT;
1183 if (copy_from_user(&zone, argp, sizeof zone))
1184 goto out;
1185 r = -ENXIO;
1186 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
1187 if (r)
1188 goto out;
1189 r = 0;
1190 break;
1191 }
1192 case KVM_UNREGISTER_COALESCED_MMIO: {
1193 struct kvm_coalesced_mmio_zone zone;
1194 r = -EFAULT;
1195 if (copy_from_user(&zone, argp, sizeof zone))
1196 goto out;
1197 r = -ENXIO;
1198 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
1199 if (r)
1200 goto out;
1201 r = 0;
1202 break;
1203 }
1204#endif
1205 default:
1206 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1207 }
1208out:
1209 return r;
1210}
1211
1212static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1213{
1214 struct kvm *kvm = vma->vm_file->private_data;
1215 struct page *page;
1216
1217 if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
1218 return VM_FAULT_SIGBUS;
1219 page = gfn_to_page(kvm, vmf->pgoff);
1220 if (is_error_page(page)) {
1221 kvm_release_page_clean(page);
1222 return VM_FAULT_SIGBUS;
1223 }
1224 vmf->page = page;
1225 return 0;
1226}
1227
1228static struct vm_operations_struct kvm_vm_vm_ops = {
1229 .fault = kvm_vm_fault,
1230};
1231
1232static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1233{
1234 vma->vm_ops = &kvm_vm_vm_ops;
1235 return 0;
1236}
1237
1238static const struct file_operations kvm_vm_fops = {
1239 .release = kvm_vm_release,
1240 .unlocked_ioctl = kvm_vm_ioctl,
1241 .compat_ioctl = kvm_vm_ioctl,
1242 .mmap = kvm_vm_mmap,
1243};
1244
1245static int kvm_dev_ioctl_create_vm(void)
1246{
1247 int fd;
1248 struct kvm *kvm;
1249
1250 kvm = kvm_create_vm();
1251 if (IS_ERR(kvm))
1252 return PTR_ERR(kvm);
1253 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm);
1254 if (fd < 0)
1255 kvm_put_kvm(kvm);
1256
1257 return fd;
1258}
1259
1260static long kvm_dev_ioctl(struct file *filp,
1261 unsigned int ioctl, unsigned long arg)
1262{
1263 long r = -EINVAL;
1264
1265 switch (ioctl) {
1266 case KVM_GET_API_VERSION:
1267 r = -EINVAL;
1268 if (arg)
1269 goto out;
1270 r = KVM_API_VERSION;
1271 break;
1272 case KVM_CREATE_VM:
1273 r = -EINVAL;
1274 if (arg)
1275 goto out;
1276 r = kvm_dev_ioctl_create_vm();
1277 break;
1278 case KVM_CHECK_EXTENSION:
1279 r = kvm_dev_ioctl_check_extension(arg);
1280 break;
1281 case KVM_GET_VCPU_MMAP_SIZE:
1282 r = -EINVAL;
1283 if (arg)
1284 goto out;
1285 r = PAGE_SIZE; /* struct kvm_run */
1286#ifdef CONFIG_X86
1287 r += PAGE_SIZE; /* pio data page */
1288#endif
1289#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1290 r += PAGE_SIZE; /* coalesced mmio ring page */
1291#endif
1292 break;
1293 case KVM_TRACE_ENABLE:
1294 case KVM_TRACE_PAUSE:
1295 case KVM_TRACE_DISABLE:
1296 r = kvm_trace_ioctl(ioctl, arg);
1297 break;
1298 default:
1299 return kvm_arch_dev_ioctl(filp, ioctl, arg);
1300 }
1301out:
1302 return r;
1303}
1304
1305static struct file_operations kvm_chardev_ops = {
1306 .unlocked_ioctl = kvm_dev_ioctl,
1307 .compat_ioctl = kvm_dev_ioctl,
1308};
1309
1310static struct miscdevice kvm_dev = {
1311 KVM_MINOR,
1312 "kvm",
1313 &kvm_chardev_ops,
1314};
1315
1316static void hardware_enable(void *junk)
1317{
1318 int cpu = raw_smp_processor_id();
1319
1320 if (cpu_isset(cpu, cpus_hardware_enabled))
1321 return;
1322 cpu_set(cpu, cpus_hardware_enabled);
1323 kvm_arch_hardware_enable(NULL);
1324}
1325
1326static void hardware_disable(void *junk)
1327{
1328 int cpu = raw_smp_processor_id();
1329
1330 if (!cpu_isset(cpu, cpus_hardware_enabled))
1331 return;
1332 cpu_clear(cpu, cpus_hardware_enabled);
1333 kvm_arch_hardware_disable(NULL);
1334}
1335
1336static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1337 void *v)
1338{
1339 int cpu = (long)v;
1340
1341 val &= ~CPU_TASKS_FROZEN;
1342 switch (val) {
1343 case CPU_DYING:
1344 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1345 cpu);
1346 hardware_disable(NULL);
1347 break;
1348 case CPU_UP_CANCELED:
1349 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1350 cpu);
1351 smp_call_function_single(cpu, hardware_disable, NULL, 1);
1352 break;
1353 case CPU_ONLINE:
1354 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1355 cpu);
1356 smp_call_function_single(cpu, hardware_enable, NULL, 1);
1357 break;
1358 }
1359 return NOTIFY_OK;
1360}
1361
1362
1363asmlinkage void kvm_handle_fault_on_reboot(void)
1364{
1365 if (kvm_rebooting)
1366 /* spin while reset goes on */
1367 while (true)
1368 ;
1369 /* Fault while not rebooting. We want the trace. */
1370 BUG();
1371}
1372EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
1373
1374static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
1375 void *v)
1376{
1377 if (val == SYS_RESTART) {
1378 /*
1379 * Some (well, at least mine) BIOSes hang on reboot if
1380 * in vmx root mode.
1381 */
1382 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1383 kvm_rebooting = true;
1384 on_each_cpu(hardware_disable, NULL, 1);
1385 }
1386 return NOTIFY_OK;
1387}
1388
1389static struct notifier_block kvm_reboot_notifier = {
1390 .notifier_call = kvm_reboot,
1391 .priority = 0,
1392};
1393
1394void kvm_io_bus_init(struct kvm_io_bus *bus)
1395{
1396 memset(bus, 0, sizeof(*bus));
1397}
1398
1399void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1400{
1401 int i;
1402
1403 for (i = 0; i < bus->dev_count; i++) {
1404 struct kvm_io_device *pos = bus->devs[i];
1405
1406 kvm_iodevice_destructor(pos);
1407 }
1408}
1409
1410struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
1411 gpa_t addr, int len, int is_write)
1412{
1413 int i;
1414
1415 for (i = 0; i < bus->dev_count; i++) {
1416 struct kvm_io_device *pos = bus->devs[i];
1417
1418 if (pos->in_range(pos, addr, len, is_write))
1419 return pos;
1420 }
1421
1422 return NULL;
1423}
1424
1425void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
1426{
1427 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
1428
1429 bus->devs[bus->dev_count++] = dev;
1430}
1431
1432static struct notifier_block kvm_cpu_notifier = {
1433 .notifier_call = kvm_cpu_hotplug,
1434 .priority = 20, /* must be > scheduler priority */
1435};
1436
1437static int vm_stat_get(void *_offset, u64 *val)
1438{
1439 unsigned offset = (long)_offset;
1440 struct kvm *kvm;
1441
1442 *val = 0;
1443 spin_lock(&kvm_lock);
1444 list_for_each_entry(kvm, &vm_list, vm_list)
1445 *val += *(u32 *)((void *)kvm + offset);
1446 spin_unlock(&kvm_lock);
1447 return 0;
1448}
1449
1450DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
1451
1452static int vcpu_stat_get(void *_offset, u64 *val)
1453{
1454 unsigned offset = (long)_offset;
1455 struct kvm *kvm;
1456 struct kvm_vcpu *vcpu;
1457 int i;
1458
1459 *val = 0;
1460 spin_lock(&kvm_lock);
1461 list_for_each_entry(kvm, &vm_list, vm_list)
1462 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
1463 vcpu = kvm->vcpus[i];
1464 if (vcpu)
1465 *val += *(u32 *)((void *)vcpu + offset);
1466 }
1467 spin_unlock(&kvm_lock);
1468 return 0;
1469}
1470
1471DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
1472
1473static struct file_operations *stat_fops[] = {
1474 [KVM_STAT_VCPU] = &vcpu_stat_fops,
1475 [KVM_STAT_VM] = &vm_stat_fops,
1476};
1477
1478static void kvm_init_debug(void)
1479{
1480 struct kvm_stats_debugfs_item *p;
1481
1482 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
1483 for (p = debugfs_entries; p->name; ++p)
1484 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
1485 (void *)(long)p->offset,
1486 stat_fops[p->kind]);
1487}
1488
1489static void kvm_exit_debug(void)
1490{
1491 struct kvm_stats_debugfs_item *p;
1492
1493 for (p = debugfs_entries; p->name; ++p)
1494 debugfs_remove(p->dentry);
1495 debugfs_remove(kvm_debugfs_dir);
1496}
1497
1498static int kvm_suspend(struct sys_device *dev, pm_message_t state)
1499{
1500 hardware_disable(NULL);
1501 return 0;
1502}
1503
1504static int kvm_resume(struct sys_device *dev)
1505{
1506 hardware_enable(NULL);
1507 return 0;
1508}
1509
1510static struct sysdev_class kvm_sysdev_class = {
1511 .name = "kvm",
1512 .suspend = kvm_suspend,
1513 .resume = kvm_resume,
1514};
1515
1516static struct sys_device kvm_sysdev = {
1517 .id = 0,
1518 .cls = &kvm_sysdev_class,
1519};
1520
1521struct page *bad_page;
1522pfn_t bad_pfn;
1523
1524static inline
1525struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
1526{
1527 return container_of(pn, struct kvm_vcpu, preempt_notifier);
1528}
1529
1530static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
1531{
1532 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1533
1534 kvm_arch_vcpu_load(vcpu, cpu);
1535}
1536
1537static void kvm_sched_out(struct preempt_notifier *pn,
1538 struct task_struct *next)
1539{
1540 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1541
1542 kvm_arch_vcpu_put(vcpu);
1543}
1544
1545int kvm_init(void *opaque, unsigned int vcpu_size,
1546 struct module *module)
1547{
1548 int r;
1549 int cpu;
1550
1551 kvm_init_debug();
1552
1553 r = kvm_arch_init(opaque);
1554 if (r)
1555 goto out_fail;
1556
1557 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1558
1559 if (bad_page == NULL) {
1560 r = -ENOMEM;
1561 goto out;
1562 }
1563
1564 bad_pfn = page_to_pfn(bad_page);
1565
1566 r = kvm_arch_hardware_setup();
1567 if (r < 0)
1568 goto out_free_0;
1569
1570 for_each_online_cpu(cpu) {
1571 smp_call_function_single(cpu,
1572 kvm_arch_check_processor_compat,
1573 &r, 1);
1574 if (r < 0)
1575 goto out_free_1;
1576 }
1577
1578 on_each_cpu(hardware_enable, NULL, 1);
1579 r = register_cpu_notifier(&kvm_cpu_notifier);
1580 if (r)
1581 goto out_free_2;
1582 register_reboot_notifier(&kvm_reboot_notifier);
1583
1584 r = sysdev_class_register(&kvm_sysdev_class);
1585 if (r)
1586 goto out_free_3;
1587
1588 r = sysdev_register(&kvm_sysdev);
1589 if (r)
1590 goto out_free_4;
1591
1592 /* A kmem cache lets us meet the alignment requirements of fx_save. */
1593 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
1594 __alignof__(struct kvm_vcpu),
1595 0, NULL);
1596 if (!kvm_vcpu_cache) {
1597 r = -ENOMEM;
1598 goto out_free_5;
1599 }
1600
1601 kvm_chardev_ops.owner = module;
1602
1603 r = misc_register(&kvm_dev);
1604 if (r) {
1605 printk(KERN_ERR "kvm: misc device register failed\n");
1606 goto out_free;
1607 }
1608
1609 kvm_preempt_ops.sched_in = kvm_sched_in;
1610 kvm_preempt_ops.sched_out = kvm_sched_out;
1611
1612 return 0;
1613
1614out_free:
1615 kmem_cache_destroy(kvm_vcpu_cache);
1616out_free_5:
1617 sysdev_unregister(&kvm_sysdev);
1618out_free_4:
1619 sysdev_class_unregister(&kvm_sysdev_class);
1620out_free_3:
1621 unregister_reboot_notifier(&kvm_reboot_notifier);
1622 unregister_cpu_notifier(&kvm_cpu_notifier);
1623out_free_2:
1624 on_each_cpu(hardware_disable, NULL, 1);
1625out_free_1:
1626 kvm_arch_hardware_unsetup();
1627out_free_0:
1628 __free_page(bad_page);
1629out:
1630 kvm_arch_exit();
1631 kvm_exit_debug();
1632out_fail:
1633 return r;
1634}
1635EXPORT_SYMBOL_GPL(kvm_init);
1636
1637void kvm_exit(void)
1638{
1639 kvm_trace_cleanup();
1640 misc_deregister(&kvm_dev);
1641 kmem_cache_destroy(kvm_vcpu_cache);
1642 sysdev_unregister(&kvm_sysdev);
1643 sysdev_class_unregister(&kvm_sysdev_class);
1644 unregister_reboot_notifier(&kvm_reboot_notifier);
1645 unregister_cpu_notifier(&kvm_cpu_notifier);
1646 on_each_cpu(hardware_disable, NULL, 1);
1647 kvm_arch_hardware_unsetup();
1648 kvm_arch_exit();
1649 kvm_exit_debug();
1650 __free_page(bad_page);
1651}
1652EXPORT_SYMBOL_GPL(kvm_exit);
This page took 0.027719 seconds and 5 git commands to generate.