KVM: make kvm_unregister_irq_ack_notifier() safe
[deliverable/linux.git] / virt / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
e2174021 18#include "iodev.h"
6aa8b732 19
edf88417 20#include <linux/kvm_host.h>
6aa8b732
AK
21#include <linux/kvm.h>
22#include <linux/module.h>
23#include <linux/errno.h>
6aa8b732
AK
24#include <linux/percpu.h>
25#include <linux/gfp.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
6aa8b732 29#include <linux/reboot.h>
6aa8b732
AK
30#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
59ae6c6b 33#include <linux/sysdev.h>
774c47f1 34#include <linux/cpu.h>
e8edc6e0 35#include <linux/sched.h>
d9e368d6
AK
36#include <linux/cpumask.h>
37#include <linux/smp.h>
d6d28168 38#include <linux/anon_inodes.h>
04d2cc77 39#include <linux/profile.h>
7aa81cc0 40#include <linux/kvm_para.h>
6fc138d2 41#include <linux/pagemap.h>
8d4e1288 42#include <linux/mman.h>
35149e21 43#include <linux/swap.h>
6aa8b732 44
e495606d 45#include <asm/processor.h>
e495606d
AK
46#include <asm/io.h>
47#include <asm/uaccess.h>
3e021bf5 48#include <asm/pgtable.h>
6aa8b732 49
f64769eb
SY
50#ifdef CONFIG_X86
51#include <asm/msidef.h>
52#endif
53
5f94c174
LV
54#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
55#include "coalesced_mmio.h"
56#endif
57
8a98f664
XZ
58#ifdef KVM_CAP_DEVICE_ASSIGNMENT
59#include <linux/pci.h>
60#include <linux/interrupt.h>
61#include "irq.h"
62#endif
63
6aa8b732
AK
64MODULE_AUTHOR("Qumranet");
65MODULE_LICENSE("GPL");
66
5319c662
SY
67static int msi2intx = 1;
68module_param(msi2intx, bool, 0);
69
e9b11c17
ZX
70DEFINE_SPINLOCK(kvm_lock);
71LIST_HEAD(vm_list);
133de902 72
1b6c0168
AK
73static cpumask_t cpus_hardware_enabled;
74
c16f862d
RR
75struct kmem_cache *kvm_vcpu_cache;
76EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 77
15ad7146
AK
78static __read_mostly struct preempt_ops kvm_preempt_ops;
79
76f7c879 80struct dentry *kvm_debugfs_dir;
6aa8b732 81
bccf2150
AK
82static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
83 unsigned long arg);
84
e8ba5d31 85static bool kvm_rebooting;
4ecac3fd 86
8a98f664 87#ifdef KVM_CAP_DEVICE_ASSIGNMENT
f64769eb
SY
88
89#ifdef CONFIG_X86
90static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev)
91{
92 int vcpu_id;
93 struct kvm_vcpu *vcpu;
94 struct kvm_ioapic *ioapic = ioapic_irqchip(dev->kvm);
95 int dest_id = (dev->guest_msi.address_lo & MSI_ADDR_DEST_ID_MASK)
96 >> MSI_ADDR_DEST_ID_SHIFT;
97 int vector = (dev->guest_msi.data & MSI_DATA_VECTOR_MASK)
98 >> MSI_DATA_VECTOR_SHIFT;
99 int dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
100 (unsigned long *)&dev->guest_msi.address_lo);
101 int trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
102 (unsigned long *)&dev->guest_msi.data);
103 int delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
104 (unsigned long *)&dev->guest_msi.data);
105 u32 deliver_bitmask;
106
107 BUG_ON(!ioapic);
108
109 deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
110 dest_id, dest_mode);
111 /* IOAPIC delivery mode value is the same as MSI here */
112 switch (delivery_mode) {
113 case IOAPIC_LOWEST_PRIORITY:
114 vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
115 deliver_bitmask);
116 if (vcpu != NULL)
117 kvm_apic_set_irq(vcpu, vector, trig_mode);
118 else
119 printk(KERN_INFO "kvm: null lowest priority vcpu!\n");
120 break;
121 case IOAPIC_FIXED:
122 for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
123 if (!(deliver_bitmask & (1 << vcpu_id)))
124 continue;
125 deliver_bitmask &= ~(1 << vcpu_id);
126 vcpu = ioapic->kvm->vcpus[vcpu_id];
127 if (vcpu)
128 kvm_apic_set_irq(vcpu, vector, trig_mode);
129 }
130 break;
131 default:
132 printk(KERN_INFO "kvm: unsupported MSI delivery mode\n");
133 }
134}
135#else
136static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev) {}
137#endif
138
8a98f664
XZ
139static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
140 int assigned_dev_id)
141{
142 struct list_head *ptr;
143 struct kvm_assigned_dev_kernel *match;
144
145 list_for_each(ptr, head) {
146 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
147 if (match->assigned_dev_id == assigned_dev_id)
148 return match;
149 }
150 return NULL;
151}
152
153static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
154{
155 struct kvm_assigned_dev_kernel *assigned_dev;
156
157 assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
158 interrupt_work);
159
160 /* This is taken to safely inject irq inside the guest. When
161 * the interrupt injection (or the ioapic code) uses a
162 * finer-grained lock, update this
163 */
164 mutex_lock(&assigned_dev->kvm->lock);
6b9cc7fd
SY
165 if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_INTX)
166 kvm_set_irq(assigned_dev->kvm,
167 assigned_dev->irq_source_id,
168 assigned_dev->guest_irq, 1);
169 else if (assigned_dev->irq_requested_type &
170 KVM_ASSIGNED_DEV_GUEST_MSI) {
171 assigned_device_msi_dispatch(assigned_dev);
172 enable_irq(assigned_dev->host_irq);
173 }
8a98f664
XZ
174 mutex_unlock(&assigned_dev->kvm->lock);
175 kvm_put_kvm(assigned_dev->kvm);
176}
177
8a98f664
XZ
178static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
179{
180 struct kvm_assigned_dev_kernel *assigned_dev =
181 (struct kvm_assigned_dev_kernel *) dev_id;
182
183 kvm_get_kvm(assigned_dev->kvm);
184 schedule_work(&assigned_dev->interrupt_work);
185 disable_irq_nosync(irq);
186 return IRQ_HANDLED;
187}
188
189/* Ack the irq line for an assigned device */
190static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
191{
192 struct kvm_assigned_dev_kernel *dev;
193
194 if (kian->gsi == -1)
195 return;
196
197 dev = container_of(kian, struct kvm_assigned_dev_kernel,
198 ack_notifier);
5550af4d 199 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
8a98f664
XZ
200 enable_irq(dev->host_irq);
201}
202
203static void kvm_free_assigned_device(struct kvm *kvm,
204 struct kvm_assigned_dev_kernel
205 *assigned_dev)
206{
4f906c19 207 if (irqchip_in_kernel(kvm) && assigned_dev->irq_requested_type)
8a98f664 208 free_irq(assigned_dev->host_irq, (void *)assigned_dev);
6b9cc7fd
SY
209 if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
210 pci_disable_msi(assigned_dev->dev);
8a98f664 211
e19e30ef 212 kvm_unregister_irq_ack_notifier(&assigned_dev->ack_notifier);
5550af4d 213 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
8a98f664
XZ
214
215 if (cancel_work_sync(&assigned_dev->interrupt_work))
216 /* We had pending work. That means we will have to take
217 * care of kvm_put_kvm.
218 */
219 kvm_put_kvm(kvm);
220
6eb55818
SY
221 pci_reset_function(assigned_dev->dev);
222
8a98f664
XZ
223 pci_release_regions(assigned_dev->dev);
224 pci_disable_device(assigned_dev->dev);
225 pci_dev_put(assigned_dev->dev);
226
227 list_del(&assigned_dev->list);
228 kfree(assigned_dev);
229}
230
231void kvm_free_all_assigned_devices(struct kvm *kvm)
232{
233 struct list_head *ptr, *ptr2;
234 struct kvm_assigned_dev_kernel *assigned_dev;
235
236 list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
237 assigned_dev = list_entry(ptr,
238 struct kvm_assigned_dev_kernel,
239 list);
240
241 kvm_free_assigned_device(kvm, assigned_dev);
242 }
243}
244
00e3ed39
SY
245static int assigned_device_update_intx(struct kvm *kvm,
246 struct kvm_assigned_dev_kernel *adev,
247 struct kvm_assigned_irq *airq)
248{
fbac7818
SY
249 adev->guest_irq = airq->guest_irq;
250 adev->ack_notifier.gsi = airq->guest_irq;
251
252 if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_INTX)
00e3ed39 253 return 0;
00e3ed39
SY
254
255 if (irqchip_in_kernel(kvm)) {
5319c662
SY
256 if (!msi2intx &&
257 adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI) {
6b9cc7fd
SY
258 free_irq(adev->host_irq, (void *)kvm);
259 pci_disable_msi(adev->dev);
260 }
261
00e3ed39
SY
262 if (!capable(CAP_SYS_RAWIO))
263 return -EPERM;
264
265 if (airq->host_irq)
266 adev->host_irq = airq->host_irq;
267 else
268 adev->host_irq = adev->dev->irq;
00e3ed39
SY
269
270 /* Even though this is PCI, we don't want to use shared
271 * interrupts. Sharing host devices with guest-assigned devices
272 * on the same interrupt line is not a happy situation: there
273 * are going to be long delays in accepting, acking, etc.
274 */
275 if (request_irq(adev->host_irq, kvm_assigned_dev_intr,
276 0, "kvm_assigned_intx_device", (void *)adev))
277 return -EIO;
278 }
279
4f906c19
SY
280 adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_INTX |
281 KVM_ASSIGNED_DEV_HOST_INTX;
00e3ed39
SY
282 return 0;
283}
284
6b9cc7fd
SY
285#ifdef CONFIG_X86
286static int assigned_device_update_msi(struct kvm *kvm,
287 struct kvm_assigned_dev_kernel *adev,
288 struct kvm_assigned_irq *airq)
289{
290 int r;
291
5319c662
SY
292 if (airq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI) {
293 /* x86 don't care upper address of guest msi message addr */
294 adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_MSI;
295 adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_INTX;
296 adev->guest_msi.address_lo = airq->guest_msi.addr_lo;
297 adev->guest_msi.data = airq->guest_msi.data;
298 adev->ack_notifier.gsi = -1;
299 } else if (msi2intx) {
300 adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_INTX;
301 adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_MSI;
302 adev->guest_irq = airq->guest_irq;
303 adev->ack_notifier.gsi = airq->guest_irq;
304 }
6b9cc7fd
SY
305
306 if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
307 return 0;
308
309 if (irqchip_in_kernel(kvm)) {
5319c662
SY
310 if (!msi2intx) {
311 if (adev->irq_requested_type &
312 KVM_ASSIGNED_DEV_HOST_INTX)
313 free_irq(adev->host_irq, (void *)adev);
314
315 r = pci_enable_msi(adev->dev);
316 if (r)
317 return r;
318 }
6b9cc7fd
SY
319
320 adev->host_irq = adev->dev->irq;
321 if (request_irq(adev->host_irq, kvm_assigned_dev_intr, 0,
322 "kvm_assigned_msi_device", (void *)adev))
323 return -EIO;
324 }
325
5319c662
SY
326 if (!msi2intx)
327 adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_MSI;
328
329 adev->irq_requested_type |= KVM_ASSIGNED_DEV_HOST_MSI;
6b9cc7fd
SY
330 return 0;
331}
332#endif
333
8a98f664
XZ
334static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
335 struct kvm_assigned_irq
336 *assigned_irq)
337{
338 int r = 0;
339 struct kvm_assigned_dev_kernel *match;
340
341 mutex_lock(&kvm->lock);
342
343 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
344 assigned_irq->assigned_dev_id);
345 if (!match) {
346 mutex_unlock(&kvm->lock);
347 return -EINVAL;
348 }
349
4f906c19 350 if (!match->irq_requested_type) {
342ffb93
SY
351 INIT_WORK(&match->interrupt_work,
352 kvm_assigned_dev_interrupt_work_handler);
353 if (irqchip_in_kernel(kvm)) {
354 /* Register ack nofitier */
355 match->ack_notifier.gsi = -1;
356 match->ack_notifier.irq_acked =
357 kvm_assigned_dev_ack_irq;
358 kvm_register_irq_ack_notifier(kvm,
359 &match->ack_notifier);
360
361 /* Request IRQ source ID */
362 r = kvm_request_irq_source_id(kvm);
363 if (r < 0)
364 goto out_release;
365 else
366 match->irq_source_id = r;
5319c662
SY
367
368#ifdef CONFIG_X86
369 /* Determine host device irq type, we can know the
370 * result from dev->msi_enabled */
371 if (msi2intx)
372 pci_enable_msi(match->dev);
373#endif
342ffb93 374 }
8a98f664
XZ
375 }
376
5319c662
SY
377 if ((!msi2intx &&
378 (assigned_irq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI)) ||
379 (msi2intx && match->dev->msi_enabled)) {
6b9cc7fd
SY
380#ifdef CONFIG_X86
381 r = assigned_device_update_msi(kvm, match, assigned_irq);
382 if (r) {
383 printk(KERN_WARNING "kvm: failed to enable "
384 "MSI device!\n");
385 goto out_release;
386 }
387#else
388 r = -ENOTTY;
389#endif
390 } else if (assigned_irq->host_irq == 0 && match->dev->irq == 0) {
391 /* Host device IRQ 0 means don't support INTx */
5319c662
SY
392 if (!msi2intx) {
393 printk(KERN_WARNING
394 "kvm: wait device to enable MSI!\n");
395 r = 0;
396 } else {
397 printk(KERN_WARNING
398 "kvm: failed to enable MSI device!\n");
399 r = -ENOTTY;
400 goto out_release;
401 }
6b9cc7fd
SY
402 } else {
403 /* Non-sharing INTx mode */
404 r = assigned_device_update_intx(kvm, match, assigned_irq);
405 if (r) {
406 printk(KERN_WARNING "kvm: failed to enable "
407 "INTx device!\n");
408 goto out_release;
409 }
410 }
8a98f664 411
8a98f664
XZ
412 mutex_unlock(&kvm->lock);
413 return r;
414out_release:
415 mutex_unlock(&kvm->lock);
416 kvm_free_assigned_device(kvm, match);
417 return r;
418}
419
420static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
421 struct kvm_assigned_pci_dev *assigned_dev)
422{
423 int r = 0;
424 struct kvm_assigned_dev_kernel *match;
425 struct pci_dev *dev;
426
427 mutex_lock(&kvm->lock);
428
429 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
430 assigned_dev->assigned_dev_id);
431 if (match) {
432 /* device already assigned */
433 r = -EINVAL;
434 goto out;
435 }
436
437 match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
438 if (match == NULL) {
439 printk(KERN_INFO "%s: Couldn't allocate memory\n",
440 __func__);
441 r = -ENOMEM;
442 goto out;
443 }
444 dev = pci_get_bus_and_slot(assigned_dev->busnr,
445 assigned_dev->devfn);
446 if (!dev) {
447 printk(KERN_INFO "%s: host device not found\n", __func__);
448 r = -EINVAL;
449 goto out_free;
450 }
451 if (pci_enable_device(dev)) {
452 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
453 r = -EBUSY;
454 goto out_put;
455 }
456 r = pci_request_regions(dev, "kvm_assigned_device");
457 if (r) {
458 printk(KERN_INFO "%s: Could not get access to device regions\n",
459 __func__);
460 goto out_disable;
461 }
6eb55818
SY
462
463 pci_reset_function(dev);
464
8a98f664
XZ
465 match->assigned_dev_id = assigned_dev->assigned_dev_id;
466 match->host_busnr = assigned_dev->busnr;
467 match->host_devfn = assigned_dev->devfn;
468 match->dev = dev;
469
470 match->kvm = kvm;
471
472 list_add(&match->list, &kvm->arch.assigned_dev_head);
473
474 if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
475 r = kvm_iommu_map_guest(kvm, match);
476 if (r)
477 goto out_list_del;
478 }
479
480out:
481 mutex_unlock(&kvm->lock);
482 return r;
483out_list_del:
484 list_del(&match->list);
485 pci_release_regions(dev);
486out_disable:
487 pci_disable_device(dev);
488out_put:
489 pci_dev_put(dev);
490out_free:
491 kfree(match);
492 mutex_unlock(&kvm->lock);
493 return r;
494}
495#endif
496
5aacf0ca
JM
497static inline int valid_vcpu(int n)
498{
499 return likely(n >= 0 && n < KVM_MAX_VCPUS);
500}
501
c77fb9dc 502inline int kvm_is_mmio_pfn(pfn_t pfn)
cbff90a7
BAY
503{
504 if (pfn_valid(pfn))
505 return PageReserved(pfn_to_page(pfn));
506
507 return true;
508}
509
bccf2150
AK
510/*
511 * Switches to specified vcpu, until a matching vcpu_put()
512 */
313a3dc7 513void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 514{
15ad7146
AK
515 int cpu;
516
bccf2150 517 mutex_lock(&vcpu->mutex);
15ad7146
AK
518 cpu = get_cpu();
519 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 520 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 521 put_cpu();
6aa8b732
AK
522}
523
313a3dc7 524void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 525{
15ad7146 526 preempt_disable();
313a3dc7 527 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
528 preempt_notifier_unregister(&vcpu->preempt_notifier);
529 preempt_enable();
6aa8b732
AK
530 mutex_unlock(&vcpu->mutex);
531}
532
d9e368d6
AK
533static void ack_flush(void *_completed)
534{
d9e368d6
AK
535}
536
537void kvm_flush_remote_tlbs(struct kvm *kvm)
538{
597a5f55 539 int i, cpu, me;
d9e368d6
AK
540 cpumask_t cpus;
541 struct kvm_vcpu *vcpu;
d9e368d6 542
597a5f55 543 me = get_cpu();
d9e368d6 544 cpus_clear(cpus);
fb3f0f51
RR
545 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
546 vcpu = kvm->vcpus[i];
547 if (!vcpu)
548 continue;
3176bc3e 549 if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
d9e368d6
AK
550 continue;
551 cpu = vcpu->cpu;
597a5f55 552 if (cpu != -1 && cpu != me)
49d3bd7e 553 cpu_set(cpu, cpus);
d9e368d6 554 }
0f74a24c 555 if (cpus_empty(cpus))
597a5f55 556 goto out;
0f74a24c 557 ++kvm->stat.remote_tlb_flush;
49d3bd7e 558 smp_call_function_mask(cpus, ack_flush, NULL, 1);
597a5f55
AK
559out:
560 put_cpu();
d9e368d6
AK
561}
562
2e53d63a
MT
563void kvm_reload_remote_mmus(struct kvm *kvm)
564{
597a5f55 565 int i, cpu, me;
2e53d63a
MT
566 cpumask_t cpus;
567 struct kvm_vcpu *vcpu;
568
597a5f55 569 me = get_cpu();
2e53d63a
MT
570 cpus_clear(cpus);
571 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
572 vcpu = kvm->vcpus[i];
573 if (!vcpu)
574 continue;
575 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
576 continue;
577 cpu = vcpu->cpu;
597a5f55 578 if (cpu != -1 && cpu != me)
2e53d63a
MT
579 cpu_set(cpu, cpus);
580 }
581 if (cpus_empty(cpus))
597a5f55 582 goto out;
2e53d63a 583 smp_call_function_mask(cpus, ack_flush, NULL, 1);
597a5f55
AK
584out:
585 put_cpu();
2e53d63a
MT
586}
587
588
fb3f0f51
RR
589int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
590{
591 struct page *page;
592 int r;
593
594 mutex_init(&vcpu->mutex);
595 vcpu->cpu = -1;
fb3f0f51
RR
596 vcpu->kvm = kvm;
597 vcpu->vcpu_id = id;
b6958ce4 598 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
599
600 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
601 if (!page) {
602 r = -ENOMEM;
603 goto fail;
604 }
605 vcpu->run = page_address(page);
606
e9b11c17 607 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 608 if (r < 0)
e9b11c17 609 goto fail_free_run;
fb3f0f51
RR
610 return 0;
611
fb3f0f51
RR
612fail_free_run:
613 free_page((unsigned long)vcpu->run);
614fail:
76fafa5e 615 return r;
fb3f0f51
RR
616}
617EXPORT_SYMBOL_GPL(kvm_vcpu_init);
618
619void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
620{
e9b11c17 621 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
622 free_page((unsigned long)vcpu->run);
623}
624EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
625
e930bffe
AA
626#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
627static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
628{
629 return container_of(mn, struct kvm, mmu_notifier);
630}
631
632static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
633 struct mm_struct *mm,
634 unsigned long address)
635{
636 struct kvm *kvm = mmu_notifier_to_kvm(mn);
637 int need_tlb_flush;
638
639 /*
640 * When ->invalidate_page runs, the linux pte has been zapped
641 * already but the page is still allocated until
642 * ->invalidate_page returns. So if we increase the sequence
643 * here the kvm page fault will notice if the spte can't be
644 * established because the page is going to be freed. If
645 * instead the kvm page fault establishes the spte before
646 * ->invalidate_page runs, kvm_unmap_hva will release it
647 * before returning.
648 *
649 * The sequence increase only need to be seen at spin_unlock
650 * time, and not at spin_lock time.
651 *
652 * Increasing the sequence after the spin_unlock would be
653 * unsafe because the kvm page fault could then establish the
654 * pte after kvm_unmap_hva returned, without noticing the page
655 * is going to be freed.
656 */
657 spin_lock(&kvm->mmu_lock);
658 kvm->mmu_notifier_seq++;
659 need_tlb_flush = kvm_unmap_hva(kvm, address);
660 spin_unlock(&kvm->mmu_lock);
661
662 /* we've to flush the tlb before the pages can be freed */
663 if (need_tlb_flush)
664 kvm_flush_remote_tlbs(kvm);
665
666}
667
668static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
669 struct mm_struct *mm,
670 unsigned long start,
671 unsigned long end)
672{
673 struct kvm *kvm = mmu_notifier_to_kvm(mn);
674 int need_tlb_flush = 0;
675
676 spin_lock(&kvm->mmu_lock);
677 /*
678 * The count increase must become visible at unlock time as no
679 * spte can be established without taking the mmu_lock and
680 * count is also read inside the mmu_lock critical section.
681 */
682 kvm->mmu_notifier_count++;
683 for (; start < end; start += PAGE_SIZE)
684 need_tlb_flush |= kvm_unmap_hva(kvm, start);
685 spin_unlock(&kvm->mmu_lock);
686
687 /* we've to flush the tlb before the pages can be freed */
688 if (need_tlb_flush)
689 kvm_flush_remote_tlbs(kvm);
690}
691
692static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
693 struct mm_struct *mm,
694 unsigned long start,
695 unsigned long end)
696{
697 struct kvm *kvm = mmu_notifier_to_kvm(mn);
698
699 spin_lock(&kvm->mmu_lock);
700 /*
701 * This sequence increase will notify the kvm page fault that
702 * the page that is going to be mapped in the spte could have
703 * been freed.
704 */
705 kvm->mmu_notifier_seq++;
706 /*
707 * The above sequence increase must be visible before the
708 * below count decrease but both values are read by the kvm
709 * page fault under mmu_lock spinlock so we don't need to add
710 * a smb_wmb() here in between the two.
711 */
712 kvm->mmu_notifier_count--;
713 spin_unlock(&kvm->mmu_lock);
714
715 BUG_ON(kvm->mmu_notifier_count < 0);
716}
717
718static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
719 struct mm_struct *mm,
720 unsigned long address)
721{
722 struct kvm *kvm = mmu_notifier_to_kvm(mn);
723 int young;
724
725 spin_lock(&kvm->mmu_lock);
726 young = kvm_age_hva(kvm, address);
727 spin_unlock(&kvm->mmu_lock);
728
729 if (young)
730 kvm_flush_remote_tlbs(kvm);
731
732 return young;
733}
734
735static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
736 .invalidate_page = kvm_mmu_notifier_invalidate_page,
737 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
738 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
739 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
740};
741#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
742
f17abe9a 743static struct kvm *kvm_create_vm(void)
6aa8b732 744{
d19a9cd2 745 struct kvm *kvm = kvm_arch_create_vm();
5f94c174
LV
746#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
747 struct page *page;
748#endif
6aa8b732 749
d19a9cd2
ZX
750 if (IS_ERR(kvm))
751 goto out;
6aa8b732 752
5f94c174
LV
753#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
754 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
755 if (!page) {
756 kfree(kvm);
757 return ERR_PTR(-ENOMEM);
758 }
759 kvm->coalesced_mmio_ring =
760 (struct kvm_coalesced_mmio_ring *)page_address(page);
761#endif
762
e930bffe
AA
763#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
764 {
765 int err;
766 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
767 err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
768 if (err) {
769#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
770 put_page(page);
771#endif
772 kfree(kvm);
773 return ERR_PTR(err);
774 }
775 }
776#endif
777
6d4e4c4f
AK
778 kvm->mm = current->mm;
779 atomic_inc(&kvm->mm->mm_count);
aaee2c94 780 spin_lock_init(&kvm->mmu_lock);
74906345 781 kvm_io_bus_init(&kvm->pio_bus);
11ec2804 782 mutex_init(&kvm->lock);
2eeb2e94 783 kvm_io_bus_init(&kvm->mmio_bus);
72dc67a6 784 init_rwsem(&kvm->slots_lock);
d39f13b0 785 atomic_set(&kvm->users_count, 1);
5e58cfe4
RR
786 spin_lock(&kvm_lock);
787 list_add(&kvm->vm_list, &vm_list);
788 spin_unlock(&kvm_lock);
5f94c174
LV
789#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
790 kvm_coalesced_mmio_init(kvm);
791#endif
d19a9cd2 792out:
f17abe9a
AK
793 return kvm;
794}
795
6aa8b732
AK
796/*
797 * Free any memory in @free but not in @dont.
798 */
799static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
800 struct kvm_memory_slot *dont)
801{
290fc38d
IE
802 if (!dont || free->rmap != dont->rmap)
803 vfree(free->rmap);
6aa8b732
AK
804
805 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
806 vfree(free->dirty_bitmap);
807
05da4558
MT
808 if (!dont || free->lpage_info != dont->lpage_info)
809 vfree(free->lpage_info);
810
6aa8b732 811 free->npages = 0;
8b6d44c7 812 free->dirty_bitmap = NULL;
8d4e1288 813 free->rmap = NULL;
05da4558 814 free->lpage_info = NULL;
6aa8b732
AK
815}
816
d19a9cd2 817void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
818{
819 int i;
820
821 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 822 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
823}
824
f17abe9a
AK
825static void kvm_destroy_vm(struct kvm *kvm)
826{
6d4e4c4f
AK
827 struct mm_struct *mm = kvm->mm;
828
133de902
AK
829 spin_lock(&kvm_lock);
830 list_del(&kvm->vm_list);
831 spin_unlock(&kvm_lock);
74906345 832 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 833 kvm_io_bus_destroy(&kvm->mmio_bus);
5f94c174
LV
834#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
835 if (kvm->coalesced_mmio_ring != NULL)
836 free_page((unsigned long)kvm->coalesced_mmio_ring);
e930bffe
AA
837#endif
838#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
839 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
5f94c174 840#endif
d19a9cd2 841 kvm_arch_destroy_vm(kvm);
6d4e4c4f 842 mmdrop(mm);
f17abe9a
AK
843}
844
d39f13b0
IE
845void kvm_get_kvm(struct kvm *kvm)
846{
847 atomic_inc(&kvm->users_count);
848}
849EXPORT_SYMBOL_GPL(kvm_get_kvm);
850
851void kvm_put_kvm(struct kvm *kvm)
852{
853 if (atomic_dec_and_test(&kvm->users_count))
854 kvm_destroy_vm(kvm);
855}
856EXPORT_SYMBOL_GPL(kvm_put_kvm);
857
858
f17abe9a
AK
859static int kvm_vm_release(struct inode *inode, struct file *filp)
860{
861 struct kvm *kvm = filp->private_data;
862
d39f13b0 863 kvm_put_kvm(kvm);
6aa8b732
AK
864 return 0;
865}
866
6aa8b732
AK
867/*
868 * Allocate some memory and give it an address in the guest physical address
869 * space.
870 *
871 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e 872 *
10589a46 873 * Must be called holding mmap_sem for write.
6aa8b732 874 */
f78e0e2e
SY
875int __kvm_set_memory_region(struct kvm *kvm,
876 struct kvm_userspace_memory_region *mem,
877 int user_alloc)
6aa8b732
AK
878{
879 int r;
880 gfn_t base_gfn;
881 unsigned long npages;
882 unsigned long i;
883 struct kvm_memory_slot *memslot;
884 struct kvm_memory_slot old, new;
6aa8b732
AK
885
886 r = -EINVAL;
887 /* General sanity checks */
888 if (mem->memory_size & (PAGE_SIZE - 1))
889 goto out;
890 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
891 goto out;
e7cacd40 892 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
78749809 893 goto out;
e0d62c7f 894 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
895 goto out;
896 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
897 goto out;
898
899 memslot = &kvm->memslots[mem->slot];
900 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
901 npages = mem->memory_size >> PAGE_SHIFT;
902
903 if (!npages)
904 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
905
6aa8b732
AK
906 new = old = *memslot;
907
908 new.base_gfn = base_gfn;
909 new.npages = npages;
910 new.flags = mem->flags;
911
912 /* Disallow changing a memory slot's size. */
913 r = -EINVAL;
914 if (npages && old.npages && npages != old.npages)
f78e0e2e 915 goto out_free;
6aa8b732
AK
916
917 /* Check for overlaps */
918 r = -EEXIST;
919 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
920 struct kvm_memory_slot *s = &kvm->memslots[i];
921
922 if (s == memslot)
923 continue;
924 if (!((base_gfn + npages <= s->base_gfn) ||
925 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 926 goto out_free;
6aa8b732 927 }
6aa8b732 928
6aa8b732
AK
929 /* Free page dirty bitmap if unneeded */
930 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 931 new.dirty_bitmap = NULL;
6aa8b732
AK
932
933 r = -ENOMEM;
934
935 /* Allocate if a slot is being created */
eff0114a 936#ifndef CONFIG_S390
8d4e1288 937 if (npages && !new.rmap) {
d77c26fc 938 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
939
940 if (!new.rmap)
f78e0e2e 941 goto out_free;
290fc38d 942
290fc38d 943 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 944
80b14b5b 945 new.user_alloc = user_alloc;
604b38ac
AA
946 /*
947 * hva_to_rmmap() serialzies with the mmu_lock and to be
948 * safe it has to ignore memslots with !user_alloc &&
949 * !userspace_addr.
950 */
951 if (user_alloc)
952 new.userspace_addr = mem->userspace_addr;
953 else
954 new.userspace_addr = 0;
6aa8b732 955 }
05da4558
MT
956 if (npages && !new.lpage_info) {
957 int largepages = npages / KVM_PAGES_PER_HPAGE;
958 if (npages % KVM_PAGES_PER_HPAGE)
959 largepages++;
960 if (base_gfn % KVM_PAGES_PER_HPAGE)
961 largepages++;
962
963 new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
964
965 if (!new.lpage_info)
966 goto out_free;
967
968 memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
969
970 if (base_gfn % KVM_PAGES_PER_HPAGE)
971 new.lpage_info[0].write_count = 1;
972 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
973 new.lpage_info[largepages-1].write_count = 1;
974 }
6aa8b732
AK
975
976 /* Allocate page dirty bitmap if needed */
977 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
978 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
979
980 new.dirty_bitmap = vmalloc(dirty_bytes);
981 if (!new.dirty_bitmap)
f78e0e2e 982 goto out_free;
6aa8b732
AK
983 memset(new.dirty_bitmap, 0, dirty_bytes);
984 }
eff0114a 985#endif /* not defined CONFIG_S390 */
6aa8b732 986
34d4cb8f
MT
987 if (!npages)
988 kvm_arch_flush_shadow(kvm);
989
604b38ac
AA
990 spin_lock(&kvm->mmu_lock);
991 if (mem->slot >= kvm->nmemslots)
992 kvm->nmemslots = mem->slot + 1;
993
3ad82a7e 994 *memslot = new;
604b38ac 995 spin_unlock(&kvm->mmu_lock);
3ad82a7e 996
0de10343
ZX
997 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
998 if (r) {
604b38ac 999 spin_lock(&kvm->mmu_lock);
0de10343 1000 *memslot = old;
604b38ac 1001 spin_unlock(&kvm->mmu_lock);
0de10343 1002 goto out_free;
82ce2c96
IE
1003 }
1004
6aa8b732 1005 kvm_free_physmem_slot(&old, &new);
8a98f664 1006#ifdef CONFIG_DMAR
62c476c7
BAY
1007 /* map the pages in iommu page table */
1008 r = kvm_iommu_map_pages(kvm, base_gfn, npages);
1009 if (r)
1010 goto out;
8a98f664 1011#endif
6aa8b732
AK
1012 return 0;
1013
f78e0e2e 1014out_free:
6aa8b732
AK
1015 kvm_free_physmem_slot(&new, &old);
1016out:
1017 return r;
210c7c4d
IE
1018
1019}
f78e0e2e
SY
1020EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1021
1022int kvm_set_memory_region(struct kvm *kvm,
1023 struct kvm_userspace_memory_region *mem,
1024 int user_alloc)
1025{
1026 int r;
1027
72dc67a6 1028 down_write(&kvm->slots_lock);
f78e0e2e 1029 r = __kvm_set_memory_region(kvm, mem, user_alloc);
72dc67a6 1030 up_write(&kvm->slots_lock);
f78e0e2e
SY
1031 return r;
1032}
210c7c4d
IE
1033EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1034
1fe779f8
CO
1035int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1036 struct
1037 kvm_userspace_memory_region *mem,
1038 int user_alloc)
210c7c4d 1039{
e0d62c7f
IE
1040 if (mem->slot >= KVM_MEMORY_SLOTS)
1041 return -EINVAL;
210c7c4d 1042 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
1043}
1044
5bb064dc
ZX
1045int kvm_get_dirty_log(struct kvm *kvm,
1046 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
1047{
1048 struct kvm_memory_slot *memslot;
1049 int r, i;
1050 int n;
1051 unsigned long any = 0;
1052
6aa8b732
AK
1053 r = -EINVAL;
1054 if (log->slot >= KVM_MEMORY_SLOTS)
1055 goto out;
1056
1057 memslot = &kvm->memslots[log->slot];
1058 r = -ENOENT;
1059 if (!memslot->dirty_bitmap)
1060 goto out;
1061
cd1a4a98 1062 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 1063
cd1a4a98 1064 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
1065 any = memslot->dirty_bitmap[i];
1066
1067 r = -EFAULT;
1068 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1069 goto out;
1070
5bb064dc
ZX
1071 if (any)
1072 *is_dirty = 1;
6aa8b732
AK
1073
1074 r = 0;
6aa8b732 1075out:
6aa8b732
AK
1076 return r;
1077}
1078
cea7bb21
IE
1079int is_error_page(struct page *page)
1080{
1081 return page == bad_page;
1082}
1083EXPORT_SYMBOL_GPL(is_error_page);
1084
35149e21
AL
1085int is_error_pfn(pfn_t pfn)
1086{
1087 return pfn == bad_pfn;
1088}
1089EXPORT_SYMBOL_GPL(is_error_pfn);
1090
f9d46eb0
IE
1091static inline unsigned long bad_hva(void)
1092{
1093 return PAGE_OFFSET;
1094}
1095
1096int kvm_is_error_hva(unsigned long addr)
1097{
1098 return addr == bad_hva();
1099}
1100EXPORT_SYMBOL_GPL(kvm_is_error_hva);
1101
2843099f 1102struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
1103{
1104 int i;
1105
1106 for (i = 0; i < kvm->nmemslots; ++i) {
1107 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1108
1109 if (gfn >= memslot->base_gfn
1110 && gfn < memslot->base_gfn + memslot->npages)
1111 return memslot;
1112 }
8b6d44c7 1113 return NULL;
6aa8b732 1114}
2843099f 1115EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
e8207547
AK
1116
1117struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1118{
1119 gfn = unalias_gfn(kvm, gfn);
2843099f 1120 return gfn_to_memslot_unaliased(kvm, gfn);
e8207547 1121}
6aa8b732 1122
e0d62c7f
IE
1123int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1124{
1125 int i;
1126
1127 gfn = unalias_gfn(kvm, gfn);
1128 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1129 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1130
1131 if (gfn >= memslot->base_gfn
1132 && gfn < memslot->base_gfn + memslot->npages)
1133 return 1;
1134 }
1135 return 0;
1136}
1137EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1138
05da4558 1139unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
539cb660
IE
1140{
1141 struct kvm_memory_slot *slot;
1142
1143 gfn = unalias_gfn(kvm, gfn);
2843099f 1144 slot = gfn_to_memslot_unaliased(kvm, gfn);
539cb660
IE
1145 if (!slot)
1146 return bad_hva();
1147 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
1148}
0d150298 1149EXPORT_SYMBOL_GPL(gfn_to_hva);
539cb660 1150
35149e21 1151pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
954bbbc2 1152{
8d4e1288 1153 struct page *page[1];
539cb660 1154 unsigned long addr;
8d4e1288 1155 int npages;
2e2e3738 1156 pfn_t pfn;
954bbbc2 1157
60395224
AK
1158 might_sleep();
1159
539cb660
IE
1160 addr = gfn_to_hva(kvm, gfn);
1161 if (kvm_is_error_hva(addr)) {
8a7ae055 1162 get_page(bad_page);
35149e21 1163 return page_to_pfn(bad_page);
8a7ae055 1164 }
8d4e1288 1165
4c2155ce 1166 npages = get_user_pages_fast(addr, 1, 1, page);
539cb660 1167
2e2e3738
AL
1168 if (unlikely(npages != 1)) {
1169 struct vm_area_struct *vma;
1170
4c2155ce 1171 down_read(&current->mm->mmap_sem);
2e2e3738 1172 vma = find_vma(current->mm, addr);
4c2155ce 1173
2e2e3738
AL
1174 if (vma == NULL || addr < vma->vm_start ||
1175 !(vma->vm_flags & VM_PFNMAP)) {
4c2155ce 1176 up_read(&current->mm->mmap_sem);
2e2e3738
AL
1177 get_page(bad_page);
1178 return page_to_pfn(bad_page);
1179 }
1180
1181 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
4c2155ce 1182 up_read(&current->mm->mmap_sem);
c77fb9dc 1183 BUG_ON(!kvm_is_mmio_pfn(pfn));
2e2e3738
AL
1184 } else
1185 pfn = page_to_pfn(page[0]);
8d4e1288 1186
2e2e3738 1187 return pfn;
35149e21
AL
1188}
1189
1190EXPORT_SYMBOL_GPL(gfn_to_pfn);
1191
1192struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1193{
2e2e3738
AL
1194 pfn_t pfn;
1195
1196 pfn = gfn_to_pfn(kvm, gfn);
c77fb9dc 1197 if (!kvm_is_mmio_pfn(pfn))
2e2e3738
AL
1198 return pfn_to_page(pfn);
1199
c77fb9dc 1200 WARN_ON(kvm_is_mmio_pfn(pfn));
2e2e3738
AL
1201
1202 get_page(bad_page);
1203 return bad_page;
954bbbc2 1204}
aab61cc0 1205
954bbbc2
AK
1206EXPORT_SYMBOL_GPL(gfn_to_page);
1207
b4231d61
IE
1208void kvm_release_page_clean(struct page *page)
1209{
35149e21 1210 kvm_release_pfn_clean(page_to_pfn(page));
b4231d61
IE
1211}
1212EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1213
35149e21
AL
1214void kvm_release_pfn_clean(pfn_t pfn)
1215{
c77fb9dc 1216 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1217 put_page(pfn_to_page(pfn));
35149e21
AL
1218}
1219EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1220
b4231d61 1221void kvm_release_page_dirty(struct page *page)
8a7ae055 1222{
35149e21
AL
1223 kvm_release_pfn_dirty(page_to_pfn(page));
1224}
1225EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1226
1227void kvm_release_pfn_dirty(pfn_t pfn)
1228{
1229 kvm_set_pfn_dirty(pfn);
1230 kvm_release_pfn_clean(pfn);
1231}
1232EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1233
1234void kvm_set_page_dirty(struct page *page)
1235{
1236 kvm_set_pfn_dirty(page_to_pfn(page));
1237}
1238EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1239
1240void kvm_set_pfn_dirty(pfn_t pfn)
1241{
c77fb9dc 1242 if (!kvm_is_mmio_pfn(pfn)) {
2e2e3738
AL
1243 struct page *page = pfn_to_page(pfn);
1244 if (!PageReserved(page))
1245 SetPageDirty(page);
1246 }
8a7ae055 1247}
35149e21
AL
1248EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1249
1250void kvm_set_pfn_accessed(pfn_t pfn)
1251{
c77fb9dc 1252 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1253 mark_page_accessed(pfn_to_page(pfn));
35149e21
AL
1254}
1255EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1256
1257void kvm_get_pfn(pfn_t pfn)
1258{
c77fb9dc 1259 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1260 get_page(pfn_to_page(pfn));
35149e21
AL
1261}
1262EXPORT_SYMBOL_GPL(kvm_get_pfn);
8a7ae055 1263
195aefde
IE
1264static int next_segment(unsigned long len, int offset)
1265{
1266 if (len > PAGE_SIZE - offset)
1267 return PAGE_SIZE - offset;
1268 else
1269 return len;
1270}
1271
1272int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1273 int len)
1274{
e0506bcb
IE
1275 int r;
1276 unsigned long addr;
195aefde 1277
e0506bcb
IE
1278 addr = gfn_to_hva(kvm, gfn);
1279 if (kvm_is_error_hva(addr))
1280 return -EFAULT;
1281 r = copy_from_user(data, (void __user *)addr + offset, len);
1282 if (r)
195aefde 1283 return -EFAULT;
195aefde
IE
1284 return 0;
1285}
1286EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1287
1288int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1289{
1290 gfn_t gfn = gpa >> PAGE_SHIFT;
1291 int seg;
1292 int offset = offset_in_page(gpa);
1293 int ret;
1294
1295 while ((seg = next_segment(len, offset)) != 0) {
1296 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1297 if (ret < 0)
1298 return ret;
1299 offset = 0;
1300 len -= seg;
1301 data += seg;
1302 ++gfn;
1303 }
1304 return 0;
1305}
1306EXPORT_SYMBOL_GPL(kvm_read_guest);
1307
7ec54588
MT
1308int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1309 unsigned long len)
1310{
1311 int r;
1312 unsigned long addr;
1313 gfn_t gfn = gpa >> PAGE_SHIFT;
1314 int offset = offset_in_page(gpa);
1315
1316 addr = gfn_to_hva(kvm, gfn);
1317 if (kvm_is_error_hva(addr))
1318 return -EFAULT;
0aac03f0 1319 pagefault_disable();
7ec54588 1320 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
0aac03f0 1321 pagefault_enable();
7ec54588
MT
1322 if (r)
1323 return -EFAULT;
1324 return 0;
1325}
1326EXPORT_SYMBOL(kvm_read_guest_atomic);
1327
195aefde
IE
1328int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1329 int offset, int len)
1330{
e0506bcb
IE
1331 int r;
1332 unsigned long addr;
195aefde 1333
e0506bcb
IE
1334 addr = gfn_to_hva(kvm, gfn);
1335 if (kvm_is_error_hva(addr))
1336 return -EFAULT;
1337 r = copy_to_user((void __user *)addr + offset, data, len);
1338 if (r)
195aefde 1339 return -EFAULT;
195aefde
IE
1340 mark_page_dirty(kvm, gfn);
1341 return 0;
1342}
1343EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1344
1345int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1346 unsigned long len)
1347{
1348 gfn_t gfn = gpa >> PAGE_SHIFT;
1349 int seg;
1350 int offset = offset_in_page(gpa);
1351 int ret;
1352
1353 while ((seg = next_segment(len, offset)) != 0) {
1354 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1355 if (ret < 0)
1356 return ret;
1357 offset = 0;
1358 len -= seg;
1359 data += seg;
1360 ++gfn;
1361 }
1362 return 0;
1363}
1364
1365int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1366{
3e021bf5 1367 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
195aefde
IE
1368}
1369EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1370
1371int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1372{
1373 gfn_t gfn = gpa >> PAGE_SHIFT;
1374 int seg;
1375 int offset = offset_in_page(gpa);
1376 int ret;
1377
1378 while ((seg = next_segment(len, offset)) != 0) {
1379 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1380 if (ret < 0)
1381 return ret;
1382 offset = 0;
1383 len -= seg;
1384 ++gfn;
1385 }
1386 return 0;
1387}
1388EXPORT_SYMBOL_GPL(kvm_clear_guest);
1389
6aa8b732
AK
1390void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1391{
31389947 1392 struct kvm_memory_slot *memslot;
6aa8b732 1393
3b6fff19 1394 gfn = unalias_gfn(kvm, gfn);
2843099f 1395 memslot = gfn_to_memslot_unaliased(kvm, gfn);
7e9d619d
RR
1396 if (memslot && memslot->dirty_bitmap) {
1397 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 1398
7e9d619d
RR
1399 /* avoid RMW */
1400 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
1401 set_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
1402 }
1403}
1404
b6958ce4
ED
1405/*
1406 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1407 */
8776e519 1408void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 1409{
e5c239cf
MT
1410 DEFINE_WAIT(wait);
1411
1412 for (;;) {
1413 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1414
d7690175
MT
1415 if (kvm_cpu_has_interrupt(vcpu) ||
1416 kvm_cpu_has_pending_timer(vcpu) ||
1417 kvm_arch_vcpu_runnable(vcpu)) {
1418 set_bit(KVM_REQ_UNHALT, &vcpu->requests);
e5c239cf 1419 break;
d7690175 1420 }
e5c239cf
MT
1421 if (signal_pending(current))
1422 break;
1423
b6958ce4
ED
1424 vcpu_put(vcpu);
1425 schedule();
1426 vcpu_load(vcpu);
1427 }
d3bef15f 1428
e5c239cf 1429 finish_wait(&vcpu->wq, &wait);
b6958ce4
ED
1430}
1431
6aa8b732
AK
1432void kvm_resched(struct kvm_vcpu *vcpu)
1433{
3fca0365
YD
1434 if (!need_resched())
1435 return;
6aa8b732 1436 cond_resched();
6aa8b732
AK
1437}
1438EXPORT_SYMBOL_GPL(kvm_resched);
1439
e4a533a4 1440static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9a2bb7f4
AK
1441{
1442 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
9a2bb7f4
AK
1443 struct page *page;
1444
e4a533a4 1445 if (vmf->pgoff == 0)
039576c0 1446 page = virt_to_page(vcpu->run);
09566765 1447#ifdef CONFIG_X86
e4a533a4 1448 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
ad312c7c 1449 page = virt_to_page(vcpu->arch.pio_data);
5f94c174
LV
1450#endif
1451#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1452 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1453 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
09566765 1454#endif
039576c0 1455 else
e4a533a4 1456 return VM_FAULT_SIGBUS;
9a2bb7f4 1457 get_page(page);
e4a533a4 1458 vmf->page = page;
1459 return 0;
9a2bb7f4
AK
1460}
1461
1462static struct vm_operations_struct kvm_vcpu_vm_ops = {
e4a533a4 1463 .fault = kvm_vcpu_fault,
9a2bb7f4
AK
1464};
1465
1466static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1467{
1468 vma->vm_ops = &kvm_vcpu_vm_ops;
1469 return 0;
1470}
1471
bccf2150
AK
1472static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1473{
1474 struct kvm_vcpu *vcpu = filp->private_data;
1475
66c0b394 1476 kvm_put_kvm(vcpu->kvm);
bccf2150
AK
1477 return 0;
1478}
1479
5c502742 1480static const struct file_operations kvm_vcpu_fops = {
bccf2150
AK
1481 .release = kvm_vcpu_release,
1482 .unlocked_ioctl = kvm_vcpu_ioctl,
1483 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 1484 .mmap = kvm_vcpu_mmap,
bccf2150
AK
1485};
1486
1487/*
1488 * Allocates an inode for the vcpu.
1489 */
1490static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1491{
7d9dbca3 1492 int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
2030a42c 1493 if (fd < 0)
66c0b394 1494 kvm_put_kvm(vcpu->kvm);
bccf2150 1495 return fd;
bccf2150
AK
1496}
1497
c5ea7660
AK
1498/*
1499 * Creates some virtual cpus. Good luck creating more than one.
1500 */
1501static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
1502{
1503 int r;
1504 struct kvm_vcpu *vcpu;
1505
c5ea7660 1506 if (!valid_vcpu(n))
fb3f0f51 1507 return -EINVAL;
c5ea7660 1508
e9b11c17 1509 vcpu = kvm_arch_vcpu_create(kvm, n);
fb3f0f51
RR
1510 if (IS_ERR(vcpu))
1511 return PTR_ERR(vcpu);
c5ea7660 1512
15ad7146
AK
1513 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1514
26e5215f
AK
1515 r = kvm_arch_vcpu_setup(vcpu);
1516 if (r)
7d8fece6 1517 return r;
26e5215f 1518
11ec2804 1519 mutex_lock(&kvm->lock);
fb3f0f51
RR
1520 if (kvm->vcpus[n]) {
1521 r = -EEXIST;
e9b11c17 1522 goto vcpu_destroy;
fb3f0f51
RR
1523 }
1524 kvm->vcpus[n] = vcpu;
11ec2804 1525 mutex_unlock(&kvm->lock);
c5ea7660 1526
fb3f0f51 1527 /* Now it's all set up, let userspace reach it */
66c0b394 1528 kvm_get_kvm(kvm);
bccf2150
AK
1529 r = create_vcpu_fd(vcpu);
1530 if (r < 0)
fb3f0f51
RR
1531 goto unlink;
1532 return r;
39c3b86e 1533
fb3f0f51 1534unlink:
11ec2804 1535 mutex_lock(&kvm->lock);
fb3f0f51 1536 kvm->vcpus[n] = NULL;
e9b11c17 1537vcpu_destroy:
7d8fece6 1538 mutex_unlock(&kvm->lock);
d40ccc62 1539 kvm_arch_vcpu_destroy(vcpu);
c5ea7660
AK
1540 return r;
1541}
1542
1961d276
AK
1543static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1544{
1545 if (sigset) {
1546 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1547 vcpu->sigset_active = 1;
1548 vcpu->sigset = *sigset;
1549 } else
1550 vcpu->sigset_active = 0;
1551 return 0;
1552}
1553
bccf2150
AK
1554static long kvm_vcpu_ioctl(struct file *filp,
1555 unsigned int ioctl, unsigned long arg)
6aa8b732 1556{
bccf2150 1557 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 1558 void __user *argp = (void __user *)arg;
313a3dc7 1559 int r;
fa3795a7
DH
1560 struct kvm_fpu *fpu = NULL;
1561 struct kvm_sregs *kvm_sregs = NULL;
6aa8b732 1562
6d4e4c4f
AK
1563 if (vcpu->kvm->mm != current->mm)
1564 return -EIO;
6aa8b732 1565 switch (ioctl) {
9a2bb7f4 1566 case KVM_RUN:
f0fe5108
AK
1567 r = -EINVAL;
1568 if (arg)
1569 goto out;
b6c7a5dc 1570 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 1571 break;
6aa8b732 1572 case KVM_GET_REGS: {
3e4bb3ac 1573 struct kvm_regs *kvm_regs;
6aa8b732 1574
3e4bb3ac
XZ
1575 r = -ENOMEM;
1576 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1577 if (!kvm_regs)
6aa8b732 1578 goto out;
3e4bb3ac
XZ
1579 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1580 if (r)
1581 goto out_free1;
6aa8b732 1582 r = -EFAULT;
3e4bb3ac
XZ
1583 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1584 goto out_free1;
6aa8b732 1585 r = 0;
3e4bb3ac
XZ
1586out_free1:
1587 kfree(kvm_regs);
6aa8b732
AK
1588 break;
1589 }
1590 case KVM_SET_REGS: {
3e4bb3ac 1591 struct kvm_regs *kvm_regs;
6aa8b732 1592
3e4bb3ac
XZ
1593 r = -ENOMEM;
1594 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1595 if (!kvm_regs)
6aa8b732 1596 goto out;
3e4bb3ac
XZ
1597 r = -EFAULT;
1598 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1599 goto out_free2;
1600 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
6aa8b732 1601 if (r)
3e4bb3ac 1602 goto out_free2;
6aa8b732 1603 r = 0;
3e4bb3ac
XZ
1604out_free2:
1605 kfree(kvm_regs);
6aa8b732
AK
1606 break;
1607 }
1608 case KVM_GET_SREGS: {
fa3795a7
DH
1609 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1610 r = -ENOMEM;
1611 if (!kvm_sregs)
1612 goto out;
1613 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1614 if (r)
1615 goto out;
1616 r = -EFAULT;
fa3795a7 1617 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
6aa8b732
AK
1618 goto out;
1619 r = 0;
1620 break;
1621 }
1622 case KVM_SET_SREGS: {
fa3795a7
DH
1623 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1624 r = -ENOMEM;
1625 if (!kvm_sregs)
1626 goto out;
6aa8b732 1627 r = -EFAULT;
fa3795a7 1628 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
6aa8b732 1629 goto out;
fa3795a7 1630 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1631 if (r)
1632 goto out;
1633 r = 0;
1634 break;
1635 }
62d9f0db
MT
1636 case KVM_GET_MP_STATE: {
1637 struct kvm_mp_state mp_state;
1638
1639 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1640 if (r)
1641 goto out;
1642 r = -EFAULT;
1643 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1644 goto out;
1645 r = 0;
1646 break;
1647 }
1648 case KVM_SET_MP_STATE: {
1649 struct kvm_mp_state mp_state;
1650
1651 r = -EFAULT;
1652 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1653 goto out;
1654 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1655 if (r)
1656 goto out;
1657 r = 0;
1658 break;
1659 }
6aa8b732
AK
1660 case KVM_TRANSLATE: {
1661 struct kvm_translation tr;
1662
1663 r = -EFAULT;
2f366987 1664 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 1665 goto out;
8b006791 1666 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
1667 if (r)
1668 goto out;
1669 r = -EFAULT;
2f366987 1670 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
1671 goto out;
1672 r = 0;
1673 break;
1674 }
6aa8b732
AK
1675 case KVM_DEBUG_GUEST: {
1676 struct kvm_debug_guest dbg;
1677
1678 r = -EFAULT;
2f366987 1679 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 1680 goto out;
b6c7a5dc 1681 r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
6aa8b732
AK
1682 if (r)
1683 goto out;
1684 r = 0;
1685 break;
1686 }
1961d276
AK
1687 case KVM_SET_SIGNAL_MASK: {
1688 struct kvm_signal_mask __user *sigmask_arg = argp;
1689 struct kvm_signal_mask kvm_sigmask;
1690 sigset_t sigset, *p;
1691
1692 p = NULL;
1693 if (argp) {
1694 r = -EFAULT;
1695 if (copy_from_user(&kvm_sigmask, argp,
1696 sizeof kvm_sigmask))
1697 goto out;
1698 r = -EINVAL;
1699 if (kvm_sigmask.len != sizeof sigset)
1700 goto out;
1701 r = -EFAULT;
1702 if (copy_from_user(&sigset, sigmask_arg->sigset,
1703 sizeof sigset))
1704 goto out;
1705 p = &sigset;
1706 }
1707 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1708 break;
1709 }
b8836737 1710 case KVM_GET_FPU: {
fa3795a7
DH
1711 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1712 r = -ENOMEM;
1713 if (!fpu)
1714 goto out;
1715 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
b8836737
AK
1716 if (r)
1717 goto out;
1718 r = -EFAULT;
fa3795a7 1719 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
b8836737
AK
1720 goto out;
1721 r = 0;
1722 break;
1723 }
1724 case KVM_SET_FPU: {
fa3795a7
DH
1725 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1726 r = -ENOMEM;
1727 if (!fpu)
1728 goto out;
b8836737 1729 r = -EFAULT;
fa3795a7 1730 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
b8836737 1731 goto out;
fa3795a7 1732 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
b8836737
AK
1733 if (r)
1734 goto out;
1735 r = 0;
1736 break;
1737 }
bccf2150 1738 default:
313a3dc7 1739 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
1740 }
1741out:
fa3795a7
DH
1742 kfree(fpu);
1743 kfree(kvm_sregs);
bccf2150
AK
1744 return r;
1745}
1746
1747static long kvm_vm_ioctl(struct file *filp,
1748 unsigned int ioctl, unsigned long arg)
1749{
1750 struct kvm *kvm = filp->private_data;
1751 void __user *argp = (void __user *)arg;
1fe779f8 1752 int r;
bccf2150 1753
6d4e4c4f
AK
1754 if (kvm->mm != current->mm)
1755 return -EIO;
bccf2150
AK
1756 switch (ioctl) {
1757 case KVM_CREATE_VCPU:
1758 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1759 if (r < 0)
1760 goto out;
1761 break;
6fc138d2
IE
1762 case KVM_SET_USER_MEMORY_REGION: {
1763 struct kvm_userspace_memory_region kvm_userspace_mem;
1764
1765 r = -EFAULT;
1766 if (copy_from_user(&kvm_userspace_mem, argp,
1767 sizeof kvm_userspace_mem))
1768 goto out;
1769
1770 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
1771 if (r)
1772 goto out;
1773 break;
1774 }
1775 case KVM_GET_DIRTY_LOG: {
1776 struct kvm_dirty_log log;
1777
1778 r = -EFAULT;
2f366987 1779 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 1780 goto out;
2c6f5df9 1781 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
1782 if (r)
1783 goto out;
1784 break;
1785 }
5f94c174
LV
1786#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1787 case KVM_REGISTER_COALESCED_MMIO: {
1788 struct kvm_coalesced_mmio_zone zone;
1789 r = -EFAULT;
1790 if (copy_from_user(&zone, argp, sizeof zone))
1791 goto out;
1792 r = -ENXIO;
1793 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
1794 if (r)
1795 goto out;
1796 r = 0;
1797 break;
1798 }
1799 case KVM_UNREGISTER_COALESCED_MMIO: {
1800 struct kvm_coalesced_mmio_zone zone;
1801 r = -EFAULT;
1802 if (copy_from_user(&zone, argp, sizeof zone))
1803 goto out;
1804 r = -ENXIO;
1805 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
1806 if (r)
1807 goto out;
1808 r = 0;
1809 break;
1810 }
8a98f664
XZ
1811#endif
1812#ifdef KVM_CAP_DEVICE_ASSIGNMENT
1813 case KVM_ASSIGN_PCI_DEVICE: {
1814 struct kvm_assigned_pci_dev assigned_dev;
1815
1816 r = -EFAULT;
1817 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
1818 goto out;
1819 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
1820 if (r)
1821 goto out;
1822 break;
1823 }
1824 case KVM_ASSIGN_IRQ: {
1825 struct kvm_assigned_irq assigned_irq;
1826
1827 r = -EFAULT;
1828 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
1829 goto out;
1830 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
1831 if (r)
1832 goto out;
1833 break;
1834 }
5f94c174 1835#endif
f17abe9a 1836 default:
1fe779f8 1837 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
1838 }
1839out:
1840 return r;
1841}
1842
e4a533a4 1843static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
f17abe9a 1844{
777b3f49
MT
1845 struct page *page[1];
1846 unsigned long addr;
1847 int npages;
1848 gfn_t gfn = vmf->pgoff;
f17abe9a 1849 struct kvm *kvm = vma->vm_file->private_data;
f17abe9a 1850
777b3f49
MT
1851 addr = gfn_to_hva(kvm, gfn);
1852 if (kvm_is_error_hva(addr))
e4a533a4 1853 return VM_FAULT_SIGBUS;
777b3f49
MT
1854
1855 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
1856 NULL);
1857 if (unlikely(npages != 1))
e4a533a4 1858 return VM_FAULT_SIGBUS;
777b3f49
MT
1859
1860 vmf->page = page[0];
e4a533a4 1861 return 0;
f17abe9a
AK
1862}
1863
1864static struct vm_operations_struct kvm_vm_vm_ops = {
e4a533a4 1865 .fault = kvm_vm_fault,
f17abe9a
AK
1866};
1867
1868static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1869{
1870 vma->vm_ops = &kvm_vm_vm_ops;
1871 return 0;
1872}
1873
5c502742 1874static const struct file_operations kvm_vm_fops = {
f17abe9a
AK
1875 .release = kvm_vm_release,
1876 .unlocked_ioctl = kvm_vm_ioctl,
1877 .compat_ioctl = kvm_vm_ioctl,
1878 .mmap = kvm_vm_mmap,
1879};
1880
1881static int kvm_dev_ioctl_create_vm(void)
1882{
2030a42c 1883 int fd;
f17abe9a
AK
1884 struct kvm *kvm;
1885
f17abe9a 1886 kvm = kvm_create_vm();
d6d28168
AK
1887 if (IS_ERR(kvm))
1888 return PTR_ERR(kvm);
7d9dbca3 1889 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
2030a42c 1890 if (fd < 0)
66c0b394 1891 kvm_put_kvm(kvm);
f17abe9a 1892
f17abe9a 1893 return fd;
f17abe9a
AK
1894}
1895
1896static long kvm_dev_ioctl(struct file *filp,
1897 unsigned int ioctl, unsigned long arg)
1898{
07c45a36 1899 long r = -EINVAL;
f17abe9a
AK
1900
1901 switch (ioctl) {
1902 case KVM_GET_API_VERSION:
f0fe5108
AK
1903 r = -EINVAL;
1904 if (arg)
1905 goto out;
f17abe9a
AK
1906 r = KVM_API_VERSION;
1907 break;
1908 case KVM_CREATE_VM:
f0fe5108
AK
1909 r = -EINVAL;
1910 if (arg)
1911 goto out;
f17abe9a
AK
1912 r = kvm_dev_ioctl_create_vm();
1913 break;
018d00d2 1914 case KVM_CHECK_EXTENSION:
1e1c65e0 1915 r = kvm_dev_ioctl_check_extension(arg);
5d308f45 1916 break;
07c45a36
AK
1917 case KVM_GET_VCPU_MMAP_SIZE:
1918 r = -EINVAL;
1919 if (arg)
1920 goto out;
adb1ff46
AK
1921 r = PAGE_SIZE; /* struct kvm_run */
1922#ifdef CONFIG_X86
1923 r += PAGE_SIZE; /* pio data page */
5f94c174
LV
1924#endif
1925#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1926 r += PAGE_SIZE; /* coalesced mmio ring page */
adb1ff46 1927#endif
07c45a36 1928 break;
d4c9ff2d
FEL
1929 case KVM_TRACE_ENABLE:
1930 case KVM_TRACE_PAUSE:
1931 case KVM_TRACE_DISABLE:
1932 r = kvm_trace_ioctl(ioctl, arg);
1933 break;
6aa8b732 1934 default:
043405e1 1935 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
1936 }
1937out:
1938 return r;
1939}
1940
6aa8b732 1941static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
1942 .unlocked_ioctl = kvm_dev_ioctl,
1943 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
1944};
1945
1946static struct miscdevice kvm_dev = {
bbe4432e 1947 KVM_MINOR,
6aa8b732
AK
1948 "kvm",
1949 &kvm_chardev_ops,
1950};
1951
1b6c0168
AK
1952static void hardware_enable(void *junk)
1953{
1954 int cpu = raw_smp_processor_id();
1955
1956 if (cpu_isset(cpu, cpus_hardware_enabled))
1957 return;
1958 cpu_set(cpu, cpus_hardware_enabled);
e9b11c17 1959 kvm_arch_hardware_enable(NULL);
1b6c0168
AK
1960}
1961
1962static void hardware_disable(void *junk)
1963{
1964 int cpu = raw_smp_processor_id();
1965
1966 if (!cpu_isset(cpu, cpus_hardware_enabled))
1967 return;
1968 cpu_clear(cpu, cpus_hardware_enabled);
e9b11c17 1969 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
1970}
1971
774c47f1
AK
1972static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1973 void *v)
1974{
1975 int cpu = (long)v;
1976
1a6f4d7f 1977 val &= ~CPU_TASKS_FROZEN;
774c47f1 1978 switch (val) {
cec9ad27 1979 case CPU_DYING:
6ec8a856
AK
1980 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1981 cpu);
1982 hardware_disable(NULL);
1983 break;
774c47f1 1984 case CPU_UP_CANCELED:
43934a38
JK
1985 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1986 cpu);
8691e5a8 1987 smp_call_function_single(cpu, hardware_disable, NULL, 1);
774c47f1 1988 break;
43934a38
JK
1989 case CPU_ONLINE:
1990 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1991 cpu);
8691e5a8 1992 smp_call_function_single(cpu, hardware_enable, NULL, 1);
774c47f1
AK
1993 break;
1994 }
1995 return NOTIFY_OK;
1996}
1997
4ecac3fd
AK
1998
1999asmlinkage void kvm_handle_fault_on_reboot(void)
2000{
2001 if (kvm_rebooting)
2002 /* spin while reset goes on */
2003 while (true)
2004 ;
2005 /* Fault while not rebooting. We want the trace. */
2006 BUG();
2007}
2008EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
2009
9a2b85c6 2010static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 2011 void *v)
9a2b85c6
RR
2012{
2013 if (val == SYS_RESTART) {
2014 /*
2015 * Some (well, at least mine) BIOSes hang on reboot if
2016 * in vmx root mode.
2017 */
2018 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
4ecac3fd 2019 kvm_rebooting = true;
15c8b6c1 2020 on_each_cpu(hardware_disable, NULL, 1);
9a2b85c6
RR
2021 }
2022 return NOTIFY_OK;
2023}
2024
2025static struct notifier_block kvm_reboot_notifier = {
2026 .notifier_call = kvm_reboot,
2027 .priority = 0,
2028};
2029
2eeb2e94
GH
2030void kvm_io_bus_init(struct kvm_io_bus *bus)
2031{
2032 memset(bus, 0, sizeof(*bus));
2033}
2034
2035void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2036{
2037 int i;
2038
2039 for (i = 0; i < bus->dev_count; i++) {
2040 struct kvm_io_device *pos = bus->devs[i];
2041
2042 kvm_iodevice_destructor(pos);
2043 }
2044}
2045
92760499
LV
2046struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
2047 gpa_t addr, int len, int is_write)
2eeb2e94
GH
2048{
2049 int i;
2050
2051 for (i = 0; i < bus->dev_count; i++) {
2052 struct kvm_io_device *pos = bus->devs[i];
2053
92760499 2054 if (pos->in_range(pos, addr, len, is_write))
2eeb2e94
GH
2055 return pos;
2056 }
2057
2058 return NULL;
2059}
2060
2061void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
2062{
2063 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
2064
2065 bus->devs[bus->dev_count++] = dev;
2066}
2067
774c47f1
AK
2068static struct notifier_block kvm_cpu_notifier = {
2069 .notifier_call = kvm_cpu_hotplug,
2070 .priority = 20, /* must be > scheduler priority */
2071};
2072
8b88b099 2073static int vm_stat_get(void *_offset, u64 *val)
ba1389b7
AK
2074{
2075 unsigned offset = (long)_offset;
ba1389b7
AK
2076 struct kvm *kvm;
2077
8b88b099 2078 *val = 0;
ba1389b7
AK
2079 spin_lock(&kvm_lock);
2080 list_for_each_entry(kvm, &vm_list, vm_list)
8b88b099 2081 *val += *(u32 *)((void *)kvm + offset);
ba1389b7 2082 spin_unlock(&kvm_lock);
8b88b099 2083 return 0;
ba1389b7
AK
2084}
2085
2086DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
2087
8b88b099 2088static int vcpu_stat_get(void *_offset, u64 *val)
1165f5fe
AK
2089{
2090 unsigned offset = (long)_offset;
1165f5fe
AK
2091 struct kvm *kvm;
2092 struct kvm_vcpu *vcpu;
2093 int i;
2094
8b88b099 2095 *val = 0;
1165f5fe
AK
2096 spin_lock(&kvm_lock);
2097 list_for_each_entry(kvm, &vm_list, vm_list)
2098 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
2099 vcpu = kvm->vcpus[i];
2100 if (vcpu)
8b88b099 2101 *val += *(u32 *)((void *)vcpu + offset);
1165f5fe
AK
2102 }
2103 spin_unlock(&kvm_lock);
8b88b099 2104 return 0;
1165f5fe
AK
2105}
2106
ba1389b7
AK
2107DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2108
2109static struct file_operations *stat_fops[] = {
2110 [KVM_STAT_VCPU] = &vcpu_stat_fops,
2111 [KVM_STAT_VM] = &vm_stat_fops,
2112};
1165f5fe 2113
a16b043c 2114static void kvm_init_debug(void)
6aa8b732
AK
2115{
2116 struct kvm_stats_debugfs_item *p;
2117
76f7c879 2118 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 2119 for (p = debugfs_entries; p->name; ++p)
76f7c879 2120 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
1165f5fe 2121 (void *)(long)p->offset,
ba1389b7 2122 stat_fops[p->kind]);
6aa8b732
AK
2123}
2124
2125static void kvm_exit_debug(void)
2126{
2127 struct kvm_stats_debugfs_item *p;
2128
2129 for (p = debugfs_entries; p->name; ++p)
2130 debugfs_remove(p->dentry);
76f7c879 2131 debugfs_remove(kvm_debugfs_dir);
6aa8b732
AK
2132}
2133
59ae6c6b
AK
2134static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2135{
4267c41a 2136 hardware_disable(NULL);
59ae6c6b
AK
2137 return 0;
2138}
2139
2140static int kvm_resume(struct sys_device *dev)
2141{
4267c41a 2142 hardware_enable(NULL);
59ae6c6b
AK
2143 return 0;
2144}
2145
2146static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 2147 .name = "kvm",
59ae6c6b
AK
2148 .suspend = kvm_suspend,
2149 .resume = kvm_resume,
2150};
2151
2152static struct sys_device kvm_sysdev = {
2153 .id = 0,
2154 .cls = &kvm_sysdev_class,
2155};
2156
cea7bb21 2157struct page *bad_page;
35149e21 2158pfn_t bad_pfn;
6aa8b732 2159
15ad7146
AK
2160static inline
2161struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2162{
2163 return container_of(pn, struct kvm_vcpu, preempt_notifier);
2164}
2165
2166static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2167{
2168 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2169
e9b11c17 2170 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
2171}
2172
2173static void kvm_sched_out(struct preempt_notifier *pn,
2174 struct task_struct *next)
2175{
2176 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2177
e9b11c17 2178 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
2179}
2180
f8c16bba 2181int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 2182 struct module *module)
6aa8b732
AK
2183{
2184 int r;
002c7f7c 2185 int cpu;
6aa8b732 2186
cb498ea2
ZX
2187 kvm_init_debug();
2188
f8c16bba
ZX
2189 r = kvm_arch_init(opaque);
2190 if (r)
d2308784 2191 goto out_fail;
cb498ea2
ZX
2192
2193 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2194
2195 if (bad_page == NULL) {
2196 r = -ENOMEM;
2197 goto out;
2198 }
2199
35149e21
AL
2200 bad_pfn = page_to_pfn(bad_page);
2201
e9b11c17 2202 r = kvm_arch_hardware_setup();
6aa8b732 2203 if (r < 0)
d2308784 2204 goto out_free_0;
6aa8b732 2205
002c7f7c
YS
2206 for_each_online_cpu(cpu) {
2207 smp_call_function_single(cpu,
e9b11c17 2208 kvm_arch_check_processor_compat,
8691e5a8 2209 &r, 1);
002c7f7c 2210 if (r < 0)
d2308784 2211 goto out_free_1;
002c7f7c
YS
2212 }
2213
15c8b6c1 2214 on_each_cpu(hardware_enable, NULL, 1);
774c47f1
AK
2215 r = register_cpu_notifier(&kvm_cpu_notifier);
2216 if (r)
d2308784 2217 goto out_free_2;
6aa8b732
AK
2218 register_reboot_notifier(&kvm_reboot_notifier);
2219
59ae6c6b
AK
2220 r = sysdev_class_register(&kvm_sysdev_class);
2221 if (r)
d2308784 2222 goto out_free_3;
59ae6c6b
AK
2223
2224 r = sysdev_register(&kvm_sysdev);
2225 if (r)
d2308784 2226 goto out_free_4;
59ae6c6b 2227
c16f862d
RR
2228 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2229 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
56919c5c
JP
2230 __alignof__(struct kvm_vcpu),
2231 0, NULL);
c16f862d
RR
2232 if (!kvm_vcpu_cache) {
2233 r = -ENOMEM;
d2308784 2234 goto out_free_5;
c16f862d
RR
2235 }
2236
6aa8b732
AK
2237 kvm_chardev_ops.owner = module;
2238
2239 r = misc_register(&kvm_dev);
2240 if (r) {
d77c26fc 2241 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
2242 goto out_free;
2243 }
2244
15ad7146
AK
2245 kvm_preempt_ops.sched_in = kvm_sched_in;
2246 kvm_preempt_ops.sched_out = kvm_sched_out;
5319c662
SY
2247#ifndef CONFIG_X86
2248 msi2intx = 0;
2249#endif
15ad7146 2250
c7addb90 2251 return 0;
6aa8b732
AK
2252
2253out_free:
c16f862d 2254 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 2255out_free_5:
59ae6c6b 2256 sysdev_unregister(&kvm_sysdev);
d2308784 2257out_free_4:
59ae6c6b 2258 sysdev_class_unregister(&kvm_sysdev_class);
d2308784 2259out_free_3:
6aa8b732 2260 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1 2261 unregister_cpu_notifier(&kvm_cpu_notifier);
d2308784 2262out_free_2:
15c8b6c1 2263 on_each_cpu(hardware_disable, NULL, 1);
d2308784 2264out_free_1:
e9b11c17 2265 kvm_arch_hardware_unsetup();
d2308784
ZX
2266out_free_0:
2267 __free_page(bad_page);
ca45aaae 2268out:
f8c16bba 2269 kvm_arch_exit();
cb498ea2 2270 kvm_exit_debug();
d2308784 2271out_fail:
6aa8b732
AK
2272 return r;
2273}
cb498ea2 2274EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 2275
cb498ea2 2276void kvm_exit(void)
6aa8b732 2277{
d4c9ff2d 2278 kvm_trace_cleanup();
6aa8b732 2279 misc_deregister(&kvm_dev);
c16f862d 2280 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
2281 sysdev_unregister(&kvm_sysdev);
2282 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 2283 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 2284 unregister_cpu_notifier(&kvm_cpu_notifier);
15c8b6c1 2285 on_each_cpu(hardware_disable, NULL, 1);
e9b11c17 2286 kvm_arch_hardware_unsetup();
f8c16bba 2287 kvm_arch_exit();
6aa8b732 2288 kvm_exit_debug();
cea7bb21 2289 __free_page(bad_page);
6aa8b732 2290}
cb498ea2 2291EXPORT_SYMBOL_GPL(kvm_exit);
This page took 0.416808 seconds and 5 git commands to generate.