Use Little Endian for Dirty Bitmap
[deliverable/linux.git] / virt / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
e2174021 18#include "iodev.h"
6aa8b732 19
edf88417 20#include <linux/kvm_host.h>
6aa8b732
AK
21#include <linux/kvm.h>
22#include <linux/module.h>
23#include <linux/errno.h>
6aa8b732
AK
24#include <linux/percpu.h>
25#include <linux/gfp.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
6aa8b732 29#include <linux/reboot.h>
6aa8b732
AK
30#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
59ae6c6b 33#include <linux/sysdev.h>
774c47f1 34#include <linux/cpu.h>
e8edc6e0 35#include <linux/sched.h>
d9e368d6
AK
36#include <linux/cpumask.h>
37#include <linux/smp.h>
d6d28168 38#include <linux/anon_inodes.h>
04d2cc77 39#include <linux/profile.h>
7aa81cc0 40#include <linux/kvm_para.h>
6fc138d2 41#include <linux/pagemap.h>
8d4e1288 42#include <linux/mman.h>
35149e21 43#include <linux/swap.h>
e56d532f 44#include <linux/bitops.h>
547de29e 45#include <linux/spinlock.h>
6aa8b732 46
e495606d 47#include <asm/processor.h>
e495606d
AK
48#include <asm/io.h>
49#include <asm/uaccess.h>
3e021bf5 50#include <asm/pgtable.h>
c8240bd6 51#include <asm-generic/bitops/le.h>
6aa8b732 52
5f94c174
LV
53#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
54#include "coalesced_mmio.h"
55#endif
56
8a98f664
XZ
57#ifdef KVM_CAP_DEVICE_ASSIGNMENT
58#include <linux/pci.h>
59#include <linux/interrupt.h>
60#include "irq.h"
61#endif
62
229456fc
MT
63#define CREATE_TRACE_POINTS
64#include <trace/events/kvm.h>
65
6aa8b732
AK
66MODULE_AUTHOR("Qumranet");
67MODULE_LICENSE("GPL");
68
fa40a821
MT
69/*
70 * Ordering of locks:
71 *
22fc0294 72 * kvm->slots_lock --> kvm->lock --> kvm->irq_lock
fa40a821
MT
73 */
74
e9b11c17
ZX
75DEFINE_SPINLOCK(kvm_lock);
76LIST_HEAD(vm_list);
133de902 77
7f59f492 78static cpumask_var_t cpus_hardware_enabled;
1b6c0168 79
c16f862d
RR
80struct kmem_cache *kvm_vcpu_cache;
81EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 82
15ad7146
AK
83static __read_mostly struct preempt_ops kvm_preempt_ops;
84
76f7c879 85struct dentry *kvm_debugfs_dir;
6aa8b732 86
bccf2150
AK
87static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
88 unsigned long arg);
89
e8ba5d31 90static bool kvm_rebooting;
4ecac3fd 91
54dee993
MT
92static bool largepages_enabled = true;
93
8a98f664
XZ
94#ifdef KVM_CAP_DEVICE_ASSIGNMENT
95static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
96 int assigned_dev_id)
97{
98 struct list_head *ptr;
99 struct kvm_assigned_dev_kernel *match;
100
101 list_for_each(ptr, head) {
102 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
103 if (match->assigned_dev_id == assigned_dev_id)
104 return match;
105 }
106 return NULL;
107}
108
2350bd1f
SY
109static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
110 *assigned_dev, int irq)
111{
112 int i, index;
113 struct msix_entry *host_msix_entries;
114
115 host_msix_entries = assigned_dev->host_msix_entries;
116
117 index = -1;
118 for (i = 0; i < assigned_dev->entries_nr; i++)
119 if (irq == host_msix_entries[i].vector) {
120 index = i;
121 break;
122 }
123 if (index < 0) {
124 printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
125 return 0;
126 }
127
128 return index;
129}
130
8a98f664
XZ
131static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
132{
133 struct kvm_assigned_dev_kernel *assigned_dev;
2350bd1f 134 struct kvm *kvm;
968a6347 135 int i;
8a98f664
XZ
136
137 assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
138 interrupt_work);
2350bd1f 139 kvm = assigned_dev->kvm;
8a98f664 140
fa40a821 141 mutex_lock(&kvm->irq_lock);
547de29e 142 spin_lock_irq(&assigned_dev->assigned_dev_lock);
e56d532f 143 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
2350bd1f
SY
144 struct kvm_guest_msix_entry *guest_entries =
145 assigned_dev->guest_msix_entries;
146 for (i = 0; i < assigned_dev->entries_nr; i++) {
147 if (!(guest_entries[i].flags &
148 KVM_ASSIGNED_MSIX_PENDING))
149 continue;
150 guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
151 kvm_set_irq(assigned_dev->kvm,
152 assigned_dev->irq_source_id,
153 guest_entries[i].vector, 1);
2350bd1f 154 }
968a6347 155 } else
2350bd1f
SY
156 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
157 assigned_dev->guest_irq, 1);
2350bd1f 158
547de29e 159 spin_unlock_irq(&assigned_dev->assigned_dev_lock);
fa40a821 160 mutex_unlock(&assigned_dev->kvm->irq_lock);
8a98f664
XZ
161}
162
8a98f664
XZ
163static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
164{
547de29e 165 unsigned long flags;
8a98f664
XZ
166 struct kvm_assigned_dev_kernel *assigned_dev =
167 (struct kvm_assigned_dev_kernel *) dev_id;
168
547de29e 169 spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
e56d532f 170 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
2350bd1f
SY
171 int index = find_index_from_host_irq(assigned_dev, irq);
172 if (index < 0)
547de29e 173 goto out;
2350bd1f
SY
174 assigned_dev->guest_msix_entries[index].flags |=
175 KVM_ASSIGNED_MSIX_PENDING;
176 }
177
8a98f664 178 schedule_work(&assigned_dev->interrupt_work);
defaf158 179
968a6347
SY
180 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
181 disable_irq_nosync(irq);
182 assigned_dev->host_irq_disabled = true;
183 }
defaf158 184
547de29e
MT
185out:
186 spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
8a98f664
XZ
187 return IRQ_HANDLED;
188}
189
190/* Ack the irq line for an assigned device */
191static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
192{
193 struct kvm_assigned_dev_kernel *dev;
547de29e 194 unsigned long flags;
8a98f664
XZ
195
196 if (kian->gsi == -1)
197 return;
198
199 dev = container_of(kian, struct kvm_assigned_dev_kernel,
200 ack_notifier);
defaf158 201
5550af4d 202 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
defaf158
MM
203
204 /* The guest irq may be shared so this ack may be
205 * from another device.
206 */
547de29e 207 spin_lock_irqsave(&dev->assigned_dev_lock, flags);
defaf158
MM
208 if (dev->host_irq_disabled) {
209 enable_irq(dev->host_irq);
210 dev->host_irq_disabled = false;
211 }
547de29e 212 spin_unlock_irqrestore(&dev->assigned_dev_lock, flags);
8a98f664
XZ
213}
214
e56d532f
SY
215static void deassign_guest_irq(struct kvm *kvm,
216 struct kvm_assigned_dev_kernel *assigned_dev)
8a98f664 217{
fa40a821 218 kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
e56d532f 219 assigned_dev->ack_notifier.gsi = -1;
f29b2673
MM
220
221 if (assigned_dev->irq_source_id != -1)
222 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
223 assigned_dev->irq_source_id = -1;
e56d532f
SY
224 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
225}
8a98f664 226
e56d532f
SY
227/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
228static void deassign_host_irq(struct kvm *kvm,
229 struct kvm_assigned_dev_kernel *assigned_dev)
230{
ba4cef31
SY
231 /*
232 * In kvm_free_device_irq, cancel_work_sync return true if:
233 * 1. work is scheduled, and then cancelled.
234 * 2. work callback is executed.
235 *
236 * The first one ensured that the irq is disabled and no more events
237 * would happen. But for the second one, the irq may be enabled (e.g.
238 * for MSI). So we disable irq here to prevent further events.
239 *
240 * Notice this maybe result in nested disable if the interrupt type is
241 * INTx, but it's OK for we are going to free it.
242 *
243 * If this function is a part of VM destroy, please ensure that till
244 * now, the kvm state is still legal for probably we also have to wait
245 * interrupt_work done.
246 */
e56d532f 247 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
d510d6cc
SY
248 int i;
249 for (i = 0; i < assigned_dev->entries_nr; i++)
250 disable_irq_nosync(assigned_dev->
251 host_msix_entries[i].vector);
252
253 cancel_work_sync(&assigned_dev->interrupt_work);
254
255 for (i = 0; i < assigned_dev->entries_nr; i++)
256 free_irq(assigned_dev->host_msix_entries[i].vector,
257 (void *)assigned_dev);
258
259 assigned_dev->entries_nr = 0;
260 kfree(assigned_dev->host_msix_entries);
261 kfree(assigned_dev->guest_msix_entries);
262 pci_disable_msix(assigned_dev->dev);
263 } else {
264 /* Deal with MSI and INTx */
265 disable_irq_nosync(assigned_dev->host_irq);
266 cancel_work_sync(&assigned_dev->interrupt_work);
8a98f664 267
d510d6cc 268 free_irq(assigned_dev->host_irq, (void *)assigned_dev);
4a643be8 269
e56d532f 270 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
d510d6cc
SY
271 pci_disable_msi(assigned_dev->dev);
272 }
4a643be8 273
e56d532f
SY
274 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
275}
276
277static int kvm_deassign_irq(struct kvm *kvm,
278 struct kvm_assigned_dev_kernel *assigned_dev,
279 unsigned long irq_requested_type)
280{
281 unsigned long guest_irq_type, host_irq_type;
282
283 if (!irqchip_in_kernel(kvm))
284 return -EINVAL;
285 /* no irq assignment to deassign */
286 if (!assigned_dev->irq_requested_type)
287 return -ENXIO;
288
289 host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
290 guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
291
292 if (host_irq_type)
293 deassign_host_irq(kvm, assigned_dev);
294 if (guest_irq_type)
295 deassign_guest_irq(kvm, assigned_dev);
296
297 return 0;
4a643be8
MM
298}
299
e56d532f
SY
300static void kvm_free_assigned_irq(struct kvm *kvm,
301 struct kvm_assigned_dev_kernel *assigned_dev)
302{
303 kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
304}
4a643be8
MM
305
306static void kvm_free_assigned_device(struct kvm *kvm,
307 struct kvm_assigned_dev_kernel
308 *assigned_dev)
309{
310 kvm_free_assigned_irq(kvm, assigned_dev);
311
6eb55818
SY
312 pci_reset_function(assigned_dev->dev);
313
8a98f664
XZ
314 pci_release_regions(assigned_dev->dev);
315 pci_disable_device(assigned_dev->dev);
316 pci_dev_put(assigned_dev->dev);
317
318 list_del(&assigned_dev->list);
319 kfree(assigned_dev);
320}
321
322void kvm_free_all_assigned_devices(struct kvm *kvm)
323{
324 struct list_head *ptr, *ptr2;
325 struct kvm_assigned_dev_kernel *assigned_dev;
326
327 list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
328 assigned_dev = list_entry(ptr,
329 struct kvm_assigned_dev_kernel,
330 list);
331
332 kvm_free_assigned_device(kvm, assigned_dev);
333 }
334}
335
e56d532f
SY
336static int assigned_device_enable_host_intx(struct kvm *kvm,
337 struct kvm_assigned_dev_kernel *dev)
00e3ed39 338{
e56d532f
SY
339 dev->host_irq = dev->dev->irq;
340 /* Even though this is PCI, we don't want to use shared
341 * interrupts. Sharing host devices with guest-assigned devices
342 * on the same interrupt line is not a happy situation: there
343 * are going to be long delays in accepting, acking, etc.
344 */
345 if (request_irq(dev->host_irq, kvm_assigned_dev_intr,
346 0, "kvm_assigned_intx_device", (void *)dev))
347 return -EIO;
348 return 0;
349}
6b9cc7fd 350
e56d532f
SY
351#ifdef __KVM_HAVE_MSI
352static int assigned_device_enable_host_msi(struct kvm *kvm,
353 struct kvm_assigned_dev_kernel *dev)
354{
355 int r;
00e3ed39 356
e56d532f
SY
357 if (!dev->dev->msi_enabled) {
358 r = pci_enable_msi(dev->dev);
359 if (r)
360 return r;
361 }
00e3ed39 362
e56d532f
SY
363 dev->host_irq = dev->dev->irq;
364 if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0,
365 "kvm_assigned_msi_device", (void *)dev)) {
366 pci_disable_msi(dev->dev);
367 return -EIO;
00e3ed39
SY
368 }
369
00e3ed39
SY
370 return 0;
371}
e56d532f 372#endif
00e3ed39 373
e56d532f
SY
374#ifdef __KVM_HAVE_MSIX
375static int assigned_device_enable_host_msix(struct kvm *kvm,
376 struct kvm_assigned_dev_kernel *dev)
6b9cc7fd 377{
e56d532f 378 int i, r = -EINVAL;
6b9cc7fd 379
e56d532f
SY
380 /* host_msix_entries and guest_msix_entries should have been
381 * initialized */
382 if (dev->entries_nr == 0)
383 return r;
6b9cc7fd 384
e56d532f
SY
385 r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
386 if (r)
387 return r;
6b9cc7fd 388
e56d532f
SY
389 for (i = 0; i < dev->entries_nr; i++) {
390 r = request_irq(dev->host_msix_entries[i].vector,
391 kvm_assigned_dev_intr, 0,
392 "kvm_assigned_msix_device",
393 (void *)dev);
394 /* FIXME: free requested_irq's on failure */
395 if (r)
396 return r;
397 }
5319c662 398
e56d532f
SY
399 return 0;
400}
6b9cc7fd 401
e56d532f 402#endif
6b9cc7fd 403
e56d532f
SY
404static int assigned_device_enable_guest_intx(struct kvm *kvm,
405 struct kvm_assigned_dev_kernel *dev,
406 struct kvm_assigned_irq *irq)
407{
408 dev->guest_irq = irq->guest_irq;
409 dev->ack_notifier.gsi = irq->guest_irq;
410 return 0;
411}
5319c662 412
e56d532f
SY
413#ifdef __KVM_HAVE_MSI
414static int assigned_device_enable_guest_msi(struct kvm *kvm,
415 struct kvm_assigned_dev_kernel *dev,
416 struct kvm_assigned_irq *irq)
417{
418 dev->guest_irq = irq->guest_irq;
419 dev->ack_notifier.gsi = -1;
968a6347 420 dev->host_irq_disabled = false;
6b9cc7fd
SY
421 return 0;
422}
423#endif
e56d532f
SY
424#ifdef __KVM_HAVE_MSIX
425static int assigned_device_enable_guest_msix(struct kvm *kvm,
426 struct kvm_assigned_dev_kernel *dev,
427 struct kvm_assigned_irq *irq)
428{
429 dev->guest_irq = irq->guest_irq;
430 dev->ack_notifier.gsi = -1;
968a6347 431 dev->host_irq_disabled = false;
e56d532f
SY
432 return 0;
433}
434#endif
435
436static int assign_host_irq(struct kvm *kvm,
437 struct kvm_assigned_dev_kernel *dev,
438 __u32 host_irq_type)
439{
440 int r = -EEXIST;
441
442 if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
443 return r;
6b9cc7fd 444
e56d532f
SY
445 switch (host_irq_type) {
446 case KVM_DEV_IRQ_HOST_INTX:
447 r = assigned_device_enable_host_intx(kvm, dev);
448 break;
449#ifdef __KVM_HAVE_MSI
450 case KVM_DEV_IRQ_HOST_MSI:
451 r = assigned_device_enable_host_msi(kvm, dev);
452 break;
453#endif
d510d6cc 454#ifdef __KVM_HAVE_MSIX
e56d532f
SY
455 case KVM_DEV_IRQ_HOST_MSIX:
456 r = assigned_device_enable_host_msix(kvm, dev);
457 break;
458#endif
459 default:
460 r = -EINVAL;
461 }
d510d6cc 462
e56d532f
SY
463 if (!r)
464 dev->irq_requested_type |= host_irq_type;
d510d6cc 465
e56d532f
SY
466 return r;
467}
d510d6cc 468
e56d532f
SY
469static int assign_guest_irq(struct kvm *kvm,
470 struct kvm_assigned_dev_kernel *dev,
471 struct kvm_assigned_irq *irq,
472 unsigned long guest_irq_type)
473{
474 int id;
475 int r = -EEXIST;
d510d6cc 476
e56d532f
SY
477 if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
478 return r;
479
480 id = kvm_request_irq_source_id(kvm);
481 if (id < 0)
482 return id;
483
484 dev->irq_source_id = id;
485
486 switch (guest_irq_type) {
487 case KVM_DEV_IRQ_GUEST_INTX:
488 r = assigned_device_enable_guest_intx(kvm, dev, irq);
489 break;
490#ifdef __KVM_HAVE_MSI
491 case KVM_DEV_IRQ_GUEST_MSI:
492 r = assigned_device_enable_guest_msi(kvm, dev, irq);
493 break;
494#endif
495#ifdef __KVM_HAVE_MSIX
496 case KVM_DEV_IRQ_GUEST_MSIX:
497 r = assigned_device_enable_guest_msix(kvm, dev, irq);
498 break;
499#endif
500 default:
501 r = -EINVAL;
d510d6cc
SY
502 }
503
e56d532f
SY
504 if (!r) {
505 dev->irq_requested_type |= guest_irq_type;
506 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
507 } else
508 kvm_free_irq_source_id(kvm, dev->irq_source_id);
d510d6cc 509
e56d532f 510 return r;
d510d6cc 511}
d510d6cc 512
e56d532f 513/* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
8a98f664 514static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
e56d532f 515 struct kvm_assigned_irq *assigned_irq)
8a98f664 516{
e56d532f 517 int r = -EINVAL;
8a98f664 518 struct kvm_assigned_dev_kernel *match;
e56d532f 519 unsigned long host_irq_type, guest_irq_type;
8a98f664 520
e56d532f
SY
521 if (!capable(CAP_SYS_RAWIO))
522 return -EPERM;
8a98f664 523
e56d532f
SY
524 if (!irqchip_in_kernel(kvm))
525 return r;
526
527 mutex_lock(&kvm->lock);
528 r = -ENODEV;
8a98f664
XZ
529 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
530 assigned_irq->assigned_dev_id);
e56d532f
SY
531 if (!match)
532 goto out;
8a98f664 533
e56d532f
SY
534 host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
535 guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
17071fe7 536
e56d532f
SY
537 r = -EINVAL;
538 /* can only assign one type at a time */
539 if (hweight_long(host_irq_type) > 1)
540 goto out;
541 if (hweight_long(guest_irq_type) > 1)
542 goto out;
543 if (host_irq_type == 0 && guest_irq_type == 0)
544 goto out;
17071fe7 545
e56d532f
SY
546 r = 0;
547 if (host_irq_type)
548 r = assign_host_irq(kvm, match, host_irq_type);
549 if (r)
550 goto out;
8a98f664 551
e56d532f
SY
552 if (guest_irq_type)
553 r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
554out:
8a98f664
XZ
555 mutex_unlock(&kvm->lock);
556 return r;
e56d532f
SY
557}
558
559static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
560 struct kvm_assigned_irq
561 *assigned_irq)
562{
563 int r = -ENODEV;
564 struct kvm_assigned_dev_kernel *match;
565
566 mutex_lock(&kvm->lock);
567
568 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
569 assigned_irq->assigned_dev_id);
570 if (!match)
571 goto out;
572
573 r = kvm_deassign_irq(kvm, match, assigned_irq->flags);
574out:
8a98f664 575 mutex_unlock(&kvm->lock);
8a98f664
XZ
576 return r;
577}
578
579static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
580 struct kvm_assigned_pci_dev *assigned_dev)
581{
582 int r = 0;
583 struct kvm_assigned_dev_kernel *match;
584 struct pci_dev *dev;
585
682edb4c 586 down_read(&kvm->slots_lock);
8a98f664
XZ
587 mutex_lock(&kvm->lock);
588
589 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
590 assigned_dev->assigned_dev_id);
591 if (match) {
592 /* device already assigned */
e56d532f 593 r = -EEXIST;
8a98f664
XZ
594 goto out;
595 }
596
597 match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
598 if (match == NULL) {
599 printk(KERN_INFO "%s: Couldn't allocate memory\n",
600 __func__);
601 r = -ENOMEM;
602 goto out;
603 }
604 dev = pci_get_bus_and_slot(assigned_dev->busnr,
605 assigned_dev->devfn);
606 if (!dev) {
607 printk(KERN_INFO "%s: host device not found\n", __func__);
608 r = -EINVAL;
609 goto out_free;
610 }
611 if (pci_enable_device(dev)) {
612 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
613 r = -EBUSY;
614 goto out_put;
615 }
616 r = pci_request_regions(dev, "kvm_assigned_device");
617 if (r) {
618 printk(KERN_INFO "%s: Could not get access to device regions\n",
619 __func__);
620 goto out_disable;
621 }
6eb55818
SY
622
623 pci_reset_function(dev);
624
8a98f664
XZ
625 match->assigned_dev_id = assigned_dev->assigned_dev_id;
626 match->host_busnr = assigned_dev->busnr;
627 match->host_devfn = assigned_dev->devfn;
b653574a 628 match->flags = assigned_dev->flags;
8a98f664 629 match->dev = dev;
547de29e 630 spin_lock_init(&match->assigned_dev_lock);
f29b2673 631 match->irq_source_id = -1;
8a98f664 632 match->kvm = kvm;
e56d532f
SY
633 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
634 INIT_WORK(&match->interrupt_work,
635 kvm_assigned_dev_interrupt_work_handler);
8a98f664
XZ
636
637 list_add(&match->list, &kvm->arch.assigned_dev_head);
638
639 if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
19de40a8 640 if (!kvm->arch.iommu_domain) {
260782bc
WH
641 r = kvm_iommu_map_guest(kvm);
642 if (r)
643 goto out_list_del;
644 }
645 r = kvm_assign_device(kvm, match);
8a98f664
XZ
646 if (r)
647 goto out_list_del;
648 }
649
650out:
651 mutex_unlock(&kvm->lock);
682edb4c 652 up_read(&kvm->slots_lock);
8a98f664
XZ
653 return r;
654out_list_del:
655 list_del(&match->list);
656 pci_release_regions(dev);
657out_disable:
658 pci_disable_device(dev);
659out_put:
660 pci_dev_put(dev);
661out_free:
662 kfree(match);
663 mutex_unlock(&kvm->lock);
682edb4c 664 up_read(&kvm->slots_lock);
8a98f664
XZ
665 return r;
666}
667#endif
668
0a920356
WH
669#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
670static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
671 struct kvm_assigned_pci_dev *assigned_dev)
672{
673 int r = 0;
674 struct kvm_assigned_dev_kernel *match;
675
676 mutex_lock(&kvm->lock);
677
678 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
679 assigned_dev->assigned_dev_id);
680 if (!match) {
681 printk(KERN_INFO "%s: device hasn't been assigned before, "
682 "so cannot be deassigned\n", __func__);
683 r = -EINVAL;
684 goto out;
685 }
686
4a906e49 687 if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
0a920356
WH
688 kvm_deassign_device(kvm, match);
689
690 kvm_free_assigned_device(kvm, match);
691
692out:
693 mutex_unlock(&kvm->lock);
694 return r;
695}
696#endif
697
c77fb9dc 698inline int kvm_is_mmio_pfn(pfn_t pfn)
cbff90a7 699{
fc5659c8
JR
700 if (pfn_valid(pfn)) {
701 struct page *page = compound_head(pfn_to_page(pfn));
702 return PageReserved(page);
703 }
cbff90a7
BAY
704
705 return true;
706}
707
bccf2150
AK
708/*
709 * Switches to specified vcpu, until a matching vcpu_put()
710 */
313a3dc7 711void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 712{
15ad7146
AK
713 int cpu;
714
bccf2150 715 mutex_lock(&vcpu->mutex);
15ad7146
AK
716 cpu = get_cpu();
717 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 718 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 719 put_cpu();
6aa8b732
AK
720}
721
313a3dc7 722void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 723{
15ad7146 724 preempt_disable();
313a3dc7 725 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
726 preempt_notifier_unregister(&vcpu->preempt_notifier);
727 preempt_enable();
6aa8b732
AK
728 mutex_unlock(&vcpu->mutex);
729}
730
d9e368d6
AK
731static void ack_flush(void *_completed)
732{
d9e368d6
AK
733}
734
49846896 735static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
d9e368d6 736{
597a5f55 737 int i, cpu, me;
6ef7a1bc
RR
738 cpumask_var_t cpus;
739 bool called = true;
d9e368d6 740 struct kvm_vcpu *vcpu;
d9e368d6 741
79f55997 742 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
6ef7a1bc 743
84261923 744 spin_lock(&kvm->requests_lock);
e601e3be 745 me = smp_processor_id();
988a2cae 746 kvm_for_each_vcpu(i, vcpu, kvm) {
49846896 747 if (test_and_set_bit(req, &vcpu->requests))
d9e368d6
AK
748 continue;
749 cpu = vcpu->cpu;
6ef7a1bc
RR
750 if (cpus != NULL && cpu != -1 && cpu != me)
751 cpumask_set_cpu(cpu, cpus);
49846896 752 }
6ef7a1bc
RR
753 if (unlikely(cpus == NULL))
754 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
755 else if (!cpumask_empty(cpus))
756 smp_call_function_many(cpus, ack_flush, NULL, 1);
757 else
758 called = false;
84261923 759 spin_unlock(&kvm->requests_lock);
6ef7a1bc 760 free_cpumask_var(cpus);
49846896 761 return called;
d9e368d6
AK
762}
763
49846896 764void kvm_flush_remote_tlbs(struct kvm *kvm)
2e53d63a 765{
49846896
RR
766 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
767 ++kvm->stat.remote_tlb_flush;
2e53d63a
MT
768}
769
49846896
RR
770void kvm_reload_remote_mmus(struct kvm *kvm)
771{
772 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
773}
2e53d63a 774
fb3f0f51
RR
775int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
776{
777 struct page *page;
778 int r;
779
780 mutex_init(&vcpu->mutex);
781 vcpu->cpu = -1;
fb3f0f51
RR
782 vcpu->kvm = kvm;
783 vcpu->vcpu_id = id;
b6958ce4 784 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
785
786 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
787 if (!page) {
788 r = -ENOMEM;
789 goto fail;
790 }
791 vcpu->run = page_address(page);
792
e9b11c17 793 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 794 if (r < 0)
e9b11c17 795 goto fail_free_run;
fb3f0f51
RR
796 return 0;
797
fb3f0f51
RR
798fail_free_run:
799 free_page((unsigned long)vcpu->run);
800fail:
76fafa5e 801 return r;
fb3f0f51
RR
802}
803EXPORT_SYMBOL_GPL(kvm_vcpu_init);
804
805void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
806{
e9b11c17 807 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
808 free_page((unsigned long)vcpu->run);
809}
810EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
811
e930bffe
AA
812#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
813static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
814{
815 return container_of(mn, struct kvm, mmu_notifier);
816}
817
818static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
819 struct mm_struct *mm,
820 unsigned long address)
821{
822 struct kvm *kvm = mmu_notifier_to_kvm(mn);
823 int need_tlb_flush;
824
825 /*
826 * When ->invalidate_page runs, the linux pte has been zapped
827 * already but the page is still allocated until
828 * ->invalidate_page returns. So if we increase the sequence
829 * here the kvm page fault will notice if the spte can't be
830 * established because the page is going to be freed. If
831 * instead the kvm page fault establishes the spte before
832 * ->invalidate_page runs, kvm_unmap_hva will release it
833 * before returning.
834 *
835 * The sequence increase only need to be seen at spin_unlock
836 * time, and not at spin_lock time.
837 *
838 * Increasing the sequence after the spin_unlock would be
839 * unsafe because the kvm page fault could then establish the
840 * pte after kvm_unmap_hva returned, without noticing the page
841 * is going to be freed.
842 */
843 spin_lock(&kvm->mmu_lock);
844 kvm->mmu_notifier_seq++;
845 need_tlb_flush = kvm_unmap_hva(kvm, address);
846 spin_unlock(&kvm->mmu_lock);
847
848 /* we've to flush the tlb before the pages can be freed */
849 if (need_tlb_flush)
850 kvm_flush_remote_tlbs(kvm);
851
852}
853
3da0dd43
IE
854static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
855 struct mm_struct *mm,
856 unsigned long address,
857 pte_t pte)
858{
859 struct kvm *kvm = mmu_notifier_to_kvm(mn);
860
861 spin_lock(&kvm->mmu_lock);
862 kvm->mmu_notifier_seq++;
863 kvm_set_spte_hva(kvm, address, pte);
864 spin_unlock(&kvm->mmu_lock);
865}
866
e930bffe
AA
867static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
868 struct mm_struct *mm,
869 unsigned long start,
870 unsigned long end)
871{
872 struct kvm *kvm = mmu_notifier_to_kvm(mn);
873 int need_tlb_flush = 0;
874
875 spin_lock(&kvm->mmu_lock);
876 /*
877 * The count increase must become visible at unlock time as no
878 * spte can be established without taking the mmu_lock and
879 * count is also read inside the mmu_lock critical section.
880 */
881 kvm->mmu_notifier_count++;
882 for (; start < end; start += PAGE_SIZE)
883 need_tlb_flush |= kvm_unmap_hva(kvm, start);
884 spin_unlock(&kvm->mmu_lock);
885
886 /* we've to flush the tlb before the pages can be freed */
887 if (need_tlb_flush)
888 kvm_flush_remote_tlbs(kvm);
889}
890
891static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
892 struct mm_struct *mm,
893 unsigned long start,
894 unsigned long end)
895{
896 struct kvm *kvm = mmu_notifier_to_kvm(mn);
897
898 spin_lock(&kvm->mmu_lock);
899 /*
900 * This sequence increase will notify the kvm page fault that
901 * the page that is going to be mapped in the spte could have
902 * been freed.
903 */
904 kvm->mmu_notifier_seq++;
905 /*
906 * The above sequence increase must be visible before the
907 * below count decrease but both values are read by the kvm
908 * page fault under mmu_lock spinlock so we don't need to add
909 * a smb_wmb() here in between the two.
910 */
911 kvm->mmu_notifier_count--;
912 spin_unlock(&kvm->mmu_lock);
913
914 BUG_ON(kvm->mmu_notifier_count < 0);
915}
916
917static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
918 struct mm_struct *mm,
919 unsigned long address)
920{
921 struct kvm *kvm = mmu_notifier_to_kvm(mn);
922 int young;
923
924 spin_lock(&kvm->mmu_lock);
925 young = kvm_age_hva(kvm, address);
926 spin_unlock(&kvm->mmu_lock);
927
928 if (young)
929 kvm_flush_remote_tlbs(kvm);
930
931 return young;
932}
933
85db06e5
MT
934static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
935 struct mm_struct *mm)
936{
937 struct kvm *kvm = mmu_notifier_to_kvm(mn);
938 kvm_arch_flush_shadow(kvm);
939}
940
e930bffe
AA
941static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
942 .invalidate_page = kvm_mmu_notifier_invalidate_page,
943 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
944 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
945 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
3da0dd43 946 .change_pte = kvm_mmu_notifier_change_pte,
85db06e5 947 .release = kvm_mmu_notifier_release,
e930bffe
AA
948};
949#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
950
f17abe9a 951static struct kvm *kvm_create_vm(void)
6aa8b732 952{
d19a9cd2 953 struct kvm *kvm = kvm_arch_create_vm();
5f94c174
LV
954#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
955 struct page *page;
956#endif
6aa8b732 957
d19a9cd2
ZX
958 if (IS_ERR(kvm))
959 goto out;
75858a84 960#ifdef CONFIG_HAVE_KVM_IRQCHIP
399ec807 961 INIT_LIST_HEAD(&kvm->irq_routing);
75858a84
AK
962 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
963#endif
6aa8b732 964
5f94c174
LV
965#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
966 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
967 if (!page) {
968 kfree(kvm);
969 return ERR_PTR(-ENOMEM);
970 }
971 kvm->coalesced_mmio_ring =
972 (struct kvm_coalesced_mmio_ring *)page_address(page);
973#endif
974
e930bffe
AA
975#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
976 {
977 int err;
978 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
979 err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
980 if (err) {
981#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
982 put_page(page);
983#endif
984 kfree(kvm);
985 return ERR_PTR(err);
986 }
987 }
988#endif
989
6d4e4c4f
AK
990 kvm->mm = current->mm;
991 atomic_inc(&kvm->mm->mm_count);
aaee2c94 992 spin_lock_init(&kvm->mmu_lock);
84261923 993 spin_lock_init(&kvm->requests_lock);
74906345 994 kvm_io_bus_init(&kvm->pio_bus);
d34e6b17 995 kvm_eventfd_init(kvm);
11ec2804 996 mutex_init(&kvm->lock);
60eead79 997 mutex_init(&kvm->irq_lock);
2eeb2e94 998 kvm_io_bus_init(&kvm->mmio_bus);
72dc67a6 999 init_rwsem(&kvm->slots_lock);
d39f13b0 1000 atomic_set(&kvm->users_count, 1);
5e58cfe4
RR
1001 spin_lock(&kvm_lock);
1002 list_add(&kvm->vm_list, &vm_list);
1003 spin_unlock(&kvm_lock);
5f94c174
LV
1004#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1005 kvm_coalesced_mmio_init(kvm);
1006#endif
d19a9cd2 1007out:
f17abe9a
AK
1008 return kvm;
1009}
1010
6aa8b732
AK
1011/*
1012 * Free any memory in @free but not in @dont.
1013 */
1014static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
1015 struct kvm_memory_slot *dont)
1016{
ec04b260
JR
1017 int i;
1018
290fc38d
IE
1019 if (!dont || free->rmap != dont->rmap)
1020 vfree(free->rmap);
6aa8b732
AK
1021
1022 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
1023 vfree(free->dirty_bitmap);
1024
ec04b260
JR
1025
1026 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
1027 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
1028 vfree(free->lpage_info[i]);
1029 free->lpage_info[i] = NULL;
1030 }
1031 }
05da4558 1032
6aa8b732 1033 free->npages = 0;
8b6d44c7 1034 free->dirty_bitmap = NULL;
8d4e1288 1035 free->rmap = NULL;
6aa8b732
AK
1036}
1037
d19a9cd2 1038void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
1039{
1040 int i;
1041
1042 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 1043 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
1044}
1045
f17abe9a
AK
1046static void kvm_destroy_vm(struct kvm *kvm)
1047{
6d4e4c4f
AK
1048 struct mm_struct *mm = kvm->mm;
1049
ad8ba2cd 1050 kvm_arch_sync_events(kvm);
133de902
AK
1051 spin_lock(&kvm_lock);
1052 list_del(&kvm->vm_list);
1053 spin_unlock(&kvm_lock);
399ec807 1054 kvm_free_irq_routing(kvm);
74906345 1055 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 1056 kvm_io_bus_destroy(&kvm->mmio_bus);
5f94c174
LV
1057#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1058 if (kvm->coalesced_mmio_ring != NULL)
1059 free_page((unsigned long)kvm->coalesced_mmio_ring);
e930bffe
AA
1060#endif
1061#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1062 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
f00be0ca
GN
1063#else
1064 kvm_arch_flush_shadow(kvm);
5f94c174 1065#endif
d19a9cd2 1066 kvm_arch_destroy_vm(kvm);
6d4e4c4f 1067 mmdrop(mm);
f17abe9a
AK
1068}
1069
d39f13b0
IE
1070void kvm_get_kvm(struct kvm *kvm)
1071{
1072 atomic_inc(&kvm->users_count);
1073}
1074EXPORT_SYMBOL_GPL(kvm_get_kvm);
1075
1076void kvm_put_kvm(struct kvm *kvm)
1077{
1078 if (atomic_dec_and_test(&kvm->users_count))
1079 kvm_destroy_vm(kvm);
1080}
1081EXPORT_SYMBOL_GPL(kvm_put_kvm);
1082
1083
f17abe9a
AK
1084static int kvm_vm_release(struct inode *inode, struct file *filp)
1085{
1086 struct kvm *kvm = filp->private_data;
1087
721eecbf
GH
1088 kvm_irqfd_release(kvm);
1089
d39f13b0 1090 kvm_put_kvm(kvm);
6aa8b732
AK
1091 return 0;
1092}
1093
6aa8b732
AK
1094/*
1095 * Allocate some memory and give it an address in the guest physical address
1096 * space.
1097 *
1098 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e 1099 *
10589a46 1100 * Must be called holding mmap_sem for write.
6aa8b732 1101 */
f78e0e2e
SY
1102int __kvm_set_memory_region(struct kvm *kvm,
1103 struct kvm_userspace_memory_region *mem,
1104 int user_alloc)
6aa8b732
AK
1105{
1106 int r;
1107 gfn_t base_gfn;
28bcb112
HC
1108 unsigned long npages;
1109 unsigned long i;
6aa8b732
AK
1110 struct kvm_memory_slot *memslot;
1111 struct kvm_memory_slot old, new;
6aa8b732
AK
1112
1113 r = -EINVAL;
1114 /* General sanity checks */
1115 if (mem->memory_size & (PAGE_SIZE - 1))
1116 goto out;
1117 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
1118 goto out;
e7cacd40 1119 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
78749809 1120 goto out;
e0d62c7f 1121 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
1122 goto out;
1123 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
1124 goto out;
1125
1126 memslot = &kvm->memslots[mem->slot];
1127 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
1128 npages = mem->memory_size >> PAGE_SHIFT;
1129
1130 if (!npages)
1131 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
1132
6aa8b732
AK
1133 new = old = *memslot;
1134
1135 new.base_gfn = base_gfn;
1136 new.npages = npages;
1137 new.flags = mem->flags;
1138
1139 /* Disallow changing a memory slot's size. */
1140 r = -EINVAL;
1141 if (npages && old.npages && npages != old.npages)
f78e0e2e 1142 goto out_free;
6aa8b732
AK
1143
1144 /* Check for overlaps */
1145 r = -EEXIST;
1146 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1147 struct kvm_memory_slot *s = &kvm->memslots[i];
1148
4cd481f6 1149 if (s == memslot || !s->npages)
6aa8b732
AK
1150 continue;
1151 if (!((base_gfn + npages <= s->base_gfn) ||
1152 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 1153 goto out_free;
6aa8b732 1154 }
6aa8b732 1155
6aa8b732
AK
1156 /* Free page dirty bitmap if unneeded */
1157 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 1158 new.dirty_bitmap = NULL;
6aa8b732
AK
1159
1160 r = -ENOMEM;
1161
1162 /* Allocate if a slot is being created */
eff0114a 1163#ifndef CONFIG_S390
8d4e1288 1164 if (npages && !new.rmap) {
d77c26fc 1165 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
1166
1167 if (!new.rmap)
f78e0e2e 1168 goto out_free;
290fc38d 1169
290fc38d 1170 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 1171
80b14b5b 1172 new.user_alloc = user_alloc;
604b38ac
AA
1173 /*
1174 * hva_to_rmmap() serialzies with the mmu_lock and to be
1175 * safe it has to ignore memslots with !user_alloc &&
1176 * !userspace_addr.
1177 */
1178 if (user_alloc)
1179 new.userspace_addr = mem->userspace_addr;
1180 else
1181 new.userspace_addr = 0;
6aa8b732 1182 }
ec04b260
JR
1183 if (!npages)
1184 goto skip_lpage;
05da4558 1185
ec04b260 1186 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
28bcb112
HC
1187 unsigned long ugfn;
1188 unsigned long j;
1189 int lpages;
ec04b260 1190 int level = i + 2;
05da4558 1191
ec04b260
JR
1192 /* Avoid unused variable warning if no large pages */
1193 (void)level;
1194
1195 if (new.lpage_info[i])
1196 continue;
1197
1198 lpages = 1 + (base_gfn + npages - 1) /
1199 KVM_PAGES_PER_HPAGE(level);
1200 lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
1201
1202 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
1203
1204 if (!new.lpage_info[i])
05da4558
MT
1205 goto out_free;
1206
ec04b260
JR
1207 memset(new.lpage_info[i], 0,
1208 lpages * sizeof(*new.lpage_info[i]));
05da4558 1209
ec04b260
JR
1210 if (base_gfn % KVM_PAGES_PER_HPAGE(level))
1211 new.lpage_info[i][0].write_count = 1;
1212 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
1213 new.lpage_info[i][lpages - 1].write_count = 1;
ac04527f
AK
1214 ugfn = new.userspace_addr >> PAGE_SHIFT;
1215 /*
1216 * If the gfn and userspace address are not aligned wrt each
54dee993
MT
1217 * other, or if explicitly asked to, disable large page
1218 * support for this slot
ac04527f 1219 */
ec04b260 1220 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
54dee993 1221 !largepages_enabled)
ec04b260
JR
1222 for (j = 0; j < lpages; ++j)
1223 new.lpage_info[i][j].write_count = 1;
05da4558 1224 }
6aa8b732 1225
ec04b260
JR
1226skip_lpage:
1227
6aa8b732
AK
1228 /* Allocate page dirty bitmap if needed */
1229 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
1230 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
1231
1232 new.dirty_bitmap = vmalloc(dirty_bytes);
1233 if (!new.dirty_bitmap)
f78e0e2e 1234 goto out_free;
6aa8b732 1235 memset(new.dirty_bitmap, 0, dirty_bytes);
e244584f
IE
1236 if (old.npages)
1237 kvm_arch_flush_shadow(kvm);
6aa8b732 1238 }
3eea8437
CB
1239#else /* not defined CONFIG_S390 */
1240 new.user_alloc = user_alloc;
1241 if (user_alloc)
1242 new.userspace_addr = mem->userspace_addr;
eff0114a 1243#endif /* not defined CONFIG_S390 */
6aa8b732 1244
34d4cb8f
MT
1245 if (!npages)
1246 kvm_arch_flush_shadow(kvm);
1247
604b38ac
AA
1248 spin_lock(&kvm->mmu_lock);
1249 if (mem->slot >= kvm->nmemslots)
1250 kvm->nmemslots = mem->slot + 1;
1251
3ad82a7e 1252 *memslot = new;
604b38ac 1253 spin_unlock(&kvm->mmu_lock);
3ad82a7e 1254
0de10343
ZX
1255 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
1256 if (r) {
604b38ac 1257 spin_lock(&kvm->mmu_lock);
0de10343 1258 *memslot = old;
604b38ac 1259 spin_unlock(&kvm->mmu_lock);
0de10343 1260 goto out_free;
82ce2c96
IE
1261 }
1262
6f897248
GC
1263 kvm_free_physmem_slot(&old, npages ? &new : NULL);
1264 /* Slot deletion case: we have to update the current slot */
b43b1901 1265 spin_lock(&kvm->mmu_lock);
6f897248
GC
1266 if (!npages)
1267 *memslot = old;
b43b1901 1268 spin_unlock(&kvm->mmu_lock);
8a98f664 1269#ifdef CONFIG_DMAR
62c476c7
BAY
1270 /* map the pages in iommu page table */
1271 r = kvm_iommu_map_pages(kvm, base_gfn, npages);
1272 if (r)
1273 goto out;
8a98f664 1274#endif
6aa8b732
AK
1275 return 0;
1276
f78e0e2e 1277out_free:
6aa8b732
AK
1278 kvm_free_physmem_slot(&new, &old);
1279out:
1280 return r;
210c7c4d
IE
1281
1282}
f78e0e2e
SY
1283EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1284
1285int kvm_set_memory_region(struct kvm *kvm,
1286 struct kvm_userspace_memory_region *mem,
1287 int user_alloc)
1288{
1289 int r;
1290
72dc67a6 1291 down_write(&kvm->slots_lock);
f78e0e2e 1292 r = __kvm_set_memory_region(kvm, mem, user_alloc);
72dc67a6 1293 up_write(&kvm->slots_lock);
f78e0e2e
SY
1294 return r;
1295}
210c7c4d
IE
1296EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1297
1fe779f8
CO
1298int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1299 struct
1300 kvm_userspace_memory_region *mem,
1301 int user_alloc)
210c7c4d 1302{
e0d62c7f
IE
1303 if (mem->slot >= KVM_MEMORY_SLOTS)
1304 return -EINVAL;
210c7c4d 1305 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
1306}
1307
5bb064dc
ZX
1308int kvm_get_dirty_log(struct kvm *kvm,
1309 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
1310{
1311 struct kvm_memory_slot *memslot;
1312 int r, i;
1313 int n;
1314 unsigned long any = 0;
1315
6aa8b732
AK
1316 r = -EINVAL;
1317 if (log->slot >= KVM_MEMORY_SLOTS)
1318 goto out;
1319
1320 memslot = &kvm->memslots[log->slot];
1321 r = -ENOENT;
1322 if (!memslot->dirty_bitmap)
1323 goto out;
1324
cd1a4a98 1325 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 1326
cd1a4a98 1327 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
1328 any = memslot->dirty_bitmap[i];
1329
1330 r = -EFAULT;
1331 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1332 goto out;
1333
5bb064dc
ZX
1334 if (any)
1335 *is_dirty = 1;
6aa8b732
AK
1336
1337 r = 0;
6aa8b732 1338out:
6aa8b732
AK
1339 return r;
1340}
1341
54dee993
MT
1342void kvm_disable_largepages(void)
1343{
1344 largepages_enabled = false;
1345}
1346EXPORT_SYMBOL_GPL(kvm_disable_largepages);
1347
cea7bb21
IE
1348int is_error_page(struct page *page)
1349{
1350 return page == bad_page;
1351}
1352EXPORT_SYMBOL_GPL(is_error_page);
1353
35149e21
AL
1354int is_error_pfn(pfn_t pfn)
1355{
1356 return pfn == bad_pfn;
1357}
1358EXPORT_SYMBOL_GPL(is_error_pfn);
1359
f9d46eb0
IE
1360static inline unsigned long bad_hva(void)
1361{
1362 return PAGE_OFFSET;
1363}
1364
1365int kvm_is_error_hva(unsigned long addr)
1366{
1367 return addr == bad_hva();
1368}
1369EXPORT_SYMBOL_GPL(kvm_is_error_hva);
1370
2843099f 1371struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
1372{
1373 int i;
1374
1375 for (i = 0; i < kvm->nmemslots; ++i) {
1376 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1377
1378 if (gfn >= memslot->base_gfn
1379 && gfn < memslot->base_gfn + memslot->npages)
1380 return memslot;
1381 }
8b6d44c7 1382 return NULL;
6aa8b732 1383}
2843099f 1384EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
e8207547
AK
1385
1386struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1387{
1388 gfn = unalias_gfn(kvm, gfn);
2843099f 1389 return gfn_to_memslot_unaliased(kvm, gfn);
e8207547 1390}
6aa8b732 1391
e0d62c7f
IE
1392int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1393{
1394 int i;
1395
1396 gfn = unalias_gfn(kvm, gfn);
1397 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1398 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1399
1400 if (gfn >= memslot->base_gfn
1401 && gfn < memslot->base_gfn + memslot->npages)
1402 return 1;
1403 }
1404 return 0;
1405}
1406EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1407
05da4558 1408unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
539cb660
IE
1409{
1410 struct kvm_memory_slot *slot;
1411
1412 gfn = unalias_gfn(kvm, gfn);
2843099f 1413 slot = gfn_to_memslot_unaliased(kvm, gfn);
539cb660
IE
1414 if (!slot)
1415 return bad_hva();
1416 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
1417}
0d150298 1418EXPORT_SYMBOL_GPL(gfn_to_hva);
539cb660 1419
35149e21 1420pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
954bbbc2 1421{
8d4e1288 1422 struct page *page[1];
539cb660 1423 unsigned long addr;
8d4e1288 1424 int npages;
2e2e3738 1425 pfn_t pfn;
954bbbc2 1426
60395224
AK
1427 might_sleep();
1428
539cb660
IE
1429 addr = gfn_to_hva(kvm, gfn);
1430 if (kvm_is_error_hva(addr)) {
8a7ae055 1431 get_page(bad_page);
35149e21 1432 return page_to_pfn(bad_page);
8a7ae055 1433 }
8d4e1288 1434
4c2155ce 1435 npages = get_user_pages_fast(addr, 1, 1, page);
539cb660 1436
2e2e3738
AL
1437 if (unlikely(npages != 1)) {
1438 struct vm_area_struct *vma;
1439
4c2155ce 1440 down_read(&current->mm->mmap_sem);
2e2e3738 1441 vma = find_vma(current->mm, addr);
4c2155ce 1442
2e2e3738
AL
1443 if (vma == NULL || addr < vma->vm_start ||
1444 !(vma->vm_flags & VM_PFNMAP)) {
4c2155ce 1445 up_read(&current->mm->mmap_sem);
2e2e3738
AL
1446 get_page(bad_page);
1447 return page_to_pfn(bad_page);
1448 }
1449
1450 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
4c2155ce 1451 up_read(&current->mm->mmap_sem);
c77fb9dc 1452 BUG_ON(!kvm_is_mmio_pfn(pfn));
2e2e3738
AL
1453 } else
1454 pfn = page_to_pfn(page[0]);
8d4e1288 1455
2e2e3738 1456 return pfn;
35149e21
AL
1457}
1458
1459EXPORT_SYMBOL_GPL(gfn_to_pfn);
1460
1461struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1462{
2e2e3738
AL
1463 pfn_t pfn;
1464
1465 pfn = gfn_to_pfn(kvm, gfn);
c77fb9dc 1466 if (!kvm_is_mmio_pfn(pfn))
2e2e3738
AL
1467 return pfn_to_page(pfn);
1468
c77fb9dc 1469 WARN_ON(kvm_is_mmio_pfn(pfn));
2e2e3738
AL
1470
1471 get_page(bad_page);
1472 return bad_page;
954bbbc2 1473}
aab61cc0 1474
954bbbc2
AK
1475EXPORT_SYMBOL_GPL(gfn_to_page);
1476
b4231d61
IE
1477void kvm_release_page_clean(struct page *page)
1478{
35149e21 1479 kvm_release_pfn_clean(page_to_pfn(page));
b4231d61
IE
1480}
1481EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1482
35149e21
AL
1483void kvm_release_pfn_clean(pfn_t pfn)
1484{
c77fb9dc 1485 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1486 put_page(pfn_to_page(pfn));
35149e21
AL
1487}
1488EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1489
b4231d61 1490void kvm_release_page_dirty(struct page *page)
8a7ae055 1491{
35149e21
AL
1492 kvm_release_pfn_dirty(page_to_pfn(page));
1493}
1494EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1495
1496void kvm_release_pfn_dirty(pfn_t pfn)
1497{
1498 kvm_set_pfn_dirty(pfn);
1499 kvm_release_pfn_clean(pfn);
1500}
1501EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1502
1503void kvm_set_page_dirty(struct page *page)
1504{
1505 kvm_set_pfn_dirty(page_to_pfn(page));
1506}
1507EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1508
1509void kvm_set_pfn_dirty(pfn_t pfn)
1510{
c77fb9dc 1511 if (!kvm_is_mmio_pfn(pfn)) {
2e2e3738
AL
1512 struct page *page = pfn_to_page(pfn);
1513 if (!PageReserved(page))
1514 SetPageDirty(page);
1515 }
8a7ae055 1516}
35149e21
AL
1517EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1518
1519void kvm_set_pfn_accessed(pfn_t pfn)
1520{
c77fb9dc 1521 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1522 mark_page_accessed(pfn_to_page(pfn));
35149e21
AL
1523}
1524EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1525
1526void kvm_get_pfn(pfn_t pfn)
1527{
c77fb9dc 1528 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1529 get_page(pfn_to_page(pfn));
35149e21
AL
1530}
1531EXPORT_SYMBOL_GPL(kvm_get_pfn);
8a7ae055 1532
195aefde
IE
1533static int next_segment(unsigned long len, int offset)
1534{
1535 if (len > PAGE_SIZE - offset)
1536 return PAGE_SIZE - offset;
1537 else
1538 return len;
1539}
1540
1541int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1542 int len)
1543{
e0506bcb
IE
1544 int r;
1545 unsigned long addr;
195aefde 1546
e0506bcb
IE
1547 addr = gfn_to_hva(kvm, gfn);
1548 if (kvm_is_error_hva(addr))
1549 return -EFAULT;
1550 r = copy_from_user(data, (void __user *)addr + offset, len);
1551 if (r)
195aefde 1552 return -EFAULT;
195aefde
IE
1553 return 0;
1554}
1555EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1556
1557int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1558{
1559 gfn_t gfn = gpa >> PAGE_SHIFT;
1560 int seg;
1561 int offset = offset_in_page(gpa);
1562 int ret;
1563
1564 while ((seg = next_segment(len, offset)) != 0) {
1565 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1566 if (ret < 0)
1567 return ret;
1568 offset = 0;
1569 len -= seg;
1570 data += seg;
1571 ++gfn;
1572 }
1573 return 0;
1574}
1575EXPORT_SYMBOL_GPL(kvm_read_guest);
1576
7ec54588
MT
1577int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1578 unsigned long len)
1579{
1580 int r;
1581 unsigned long addr;
1582 gfn_t gfn = gpa >> PAGE_SHIFT;
1583 int offset = offset_in_page(gpa);
1584
1585 addr = gfn_to_hva(kvm, gfn);
1586 if (kvm_is_error_hva(addr))
1587 return -EFAULT;
0aac03f0 1588 pagefault_disable();
7ec54588 1589 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
0aac03f0 1590 pagefault_enable();
7ec54588
MT
1591 if (r)
1592 return -EFAULT;
1593 return 0;
1594}
1595EXPORT_SYMBOL(kvm_read_guest_atomic);
1596
195aefde
IE
1597int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1598 int offset, int len)
1599{
e0506bcb
IE
1600 int r;
1601 unsigned long addr;
195aefde 1602
e0506bcb
IE
1603 addr = gfn_to_hva(kvm, gfn);
1604 if (kvm_is_error_hva(addr))
1605 return -EFAULT;
1606 r = copy_to_user((void __user *)addr + offset, data, len);
1607 if (r)
195aefde 1608 return -EFAULT;
195aefde
IE
1609 mark_page_dirty(kvm, gfn);
1610 return 0;
1611}
1612EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1613
1614int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1615 unsigned long len)
1616{
1617 gfn_t gfn = gpa >> PAGE_SHIFT;
1618 int seg;
1619 int offset = offset_in_page(gpa);
1620 int ret;
1621
1622 while ((seg = next_segment(len, offset)) != 0) {
1623 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1624 if (ret < 0)
1625 return ret;
1626 offset = 0;
1627 len -= seg;
1628 data += seg;
1629 ++gfn;
1630 }
1631 return 0;
1632}
1633
1634int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1635{
3e021bf5 1636 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
195aefde
IE
1637}
1638EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1639
1640int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1641{
1642 gfn_t gfn = gpa >> PAGE_SHIFT;
1643 int seg;
1644 int offset = offset_in_page(gpa);
1645 int ret;
1646
1647 while ((seg = next_segment(len, offset)) != 0) {
1648 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1649 if (ret < 0)
1650 return ret;
1651 offset = 0;
1652 len -= seg;
1653 ++gfn;
1654 }
1655 return 0;
1656}
1657EXPORT_SYMBOL_GPL(kvm_clear_guest);
1658
6aa8b732
AK
1659void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1660{
31389947 1661 struct kvm_memory_slot *memslot;
6aa8b732 1662
3b6fff19 1663 gfn = unalias_gfn(kvm, gfn);
2843099f 1664 memslot = gfn_to_memslot_unaliased(kvm, gfn);
7e9d619d
RR
1665 if (memslot && memslot->dirty_bitmap) {
1666 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 1667
7e9d619d 1668 /* avoid RMW */
c8240bd6
AG
1669 if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
1670 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
1671 }
1672}
1673
b6958ce4
ED
1674/*
1675 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1676 */
8776e519 1677void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 1678{
e5c239cf
MT
1679 DEFINE_WAIT(wait);
1680
1681 for (;;) {
1682 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1683
a1b37100 1684 if (kvm_arch_vcpu_runnable(vcpu)) {
d7690175 1685 set_bit(KVM_REQ_UNHALT, &vcpu->requests);
e5c239cf 1686 break;
d7690175 1687 }
09cec754
GN
1688 if (kvm_cpu_has_pending_timer(vcpu))
1689 break;
e5c239cf
MT
1690 if (signal_pending(current))
1691 break;
1692
b6958ce4
ED
1693 vcpu_put(vcpu);
1694 schedule();
1695 vcpu_load(vcpu);
1696 }
d3bef15f 1697
e5c239cf 1698 finish_wait(&vcpu->wq, &wait);
b6958ce4
ED
1699}
1700
6aa8b732
AK
1701void kvm_resched(struct kvm_vcpu *vcpu)
1702{
3fca0365
YD
1703 if (!need_resched())
1704 return;
6aa8b732 1705 cond_resched();
6aa8b732
AK
1706}
1707EXPORT_SYMBOL_GPL(kvm_resched);
1708
e4a533a4 1709static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9a2bb7f4
AK
1710{
1711 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
9a2bb7f4
AK
1712 struct page *page;
1713
e4a533a4 1714 if (vmf->pgoff == 0)
039576c0 1715 page = virt_to_page(vcpu->run);
09566765 1716#ifdef CONFIG_X86
e4a533a4 1717 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
ad312c7c 1718 page = virt_to_page(vcpu->arch.pio_data);
5f94c174
LV
1719#endif
1720#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1721 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1722 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
09566765 1723#endif
039576c0 1724 else
e4a533a4 1725 return VM_FAULT_SIGBUS;
9a2bb7f4 1726 get_page(page);
e4a533a4 1727 vmf->page = page;
1728 return 0;
9a2bb7f4
AK
1729}
1730
f0f37e2f 1731static const struct vm_operations_struct kvm_vcpu_vm_ops = {
e4a533a4 1732 .fault = kvm_vcpu_fault,
9a2bb7f4
AK
1733};
1734
1735static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1736{
1737 vma->vm_ops = &kvm_vcpu_vm_ops;
1738 return 0;
1739}
1740
bccf2150
AK
1741static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1742{
1743 struct kvm_vcpu *vcpu = filp->private_data;
1744
66c0b394 1745 kvm_put_kvm(vcpu->kvm);
bccf2150
AK
1746 return 0;
1747}
1748
3d3aab1b 1749static struct file_operations kvm_vcpu_fops = {
bccf2150
AK
1750 .release = kvm_vcpu_release,
1751 .unlocked_ioctl = kvm_vcpu_ioctl,
1752 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 1753 .mmap = kvm_vcpu_mmap,
bccf2150
AK
1754};
1755
1756/*
1757 * Allocates an inode for the vcpu.
1758 */
1759static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1760{
73880c80 1761 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
bccf2150
AK
1762}
1763
c5ea7660
AK
1764/*
1765 * Creates some virtual cpus. Good luck creating more than one.
1766 */
73880c80 1767static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
c5ea7660
AK
1768{
1769 int r;
988a2cae 1770 struct kvm_vcpu *vcpu, *v;
c5ea7660 1771
73880c80 1772 vcpu = kvm_arch_vcpu_create(kvm, id);
fb3f0f51
RR
1773 if (IS_ERR(vcpu))
1774 return PTR_ERR(vcpu);
c5ea7660 1775
15ad7146
AK
1776 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1777
26e5215f
AK
1778 r = kvm_arch_vcpu_setup(vcpu);
1779 if (r)
7d8fece6 1780 return r;
26e5215f 1781
11ec2804 1782 mutex_lock(&kvm->lock);
73880c80
GN
1783 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1784 r = -EINVAL;
e9b11c17 1785 goto vcpu_destroy;
fb3f0f51 1786 }
73880c80 1787
988a2cae
GN
1788 kvm_for_each_vcpu(r, v, kvm)
1789 if (v->vcpu_id == id) {
73880c80
GN
1790 r = -EEXIST;
1791 goto vcpu_destroy;
1792 }
1793
1794 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
c5ea7660 1795
fb3f0f51 1796 /* Now it's all set up, let userspace reach it */
66c0b394 1797 kvm_get_kvm(kvm);
bccf2150 1798 r = create_vcpu_fd(vcpu);
73880c80
GN
1799 if (r < 0) {
1800 kvm_put_kvm(kvm);
1801 goto vcpu_destroy;
1802 }
1803
1804 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1805 smp_wmb();
1806 atomic_inc(&kvm->online_vcpus);
1807
1808#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1809 if (kvm->bsp_vcpu_id == id)
1810 kvm->bsp_vcpu = vcpu;
1811#endif
1812 mutex_unlock(&kvm->lock);
fb3f0f51 1813 return r;
39c3b86e 1814
e9b11c17 1815vcpu_destroy:
7d8fece6 1816 mutex_unlock(&kvm->lock);
d40ccc62 1817 kvm_arch_vcpu_destroy(vcpu);
c5ea7660
AK
1818 return r;
1819}
1820
1961d276
AK
1821static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1822{
1823 if (sigset) {
1824 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1825 vcpu->sigset_active = 1;
1826 vcpu->sigset = *sigset;
1827 } else
1828 vcpu->sigset_active = 0;
1829 return 0;
1830}
1831
c1e01514
SY
1832#ifdef __KVM_HAVE_MSIX
1833static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
1834 struct kvm_assigned_msix_nr *entry_nr)
1835{
1836 int r = 0;
1837 struct kvm_assigned_dev_kernel *adev;
1838
1839 mutex_lock(&kvm->lock);
1840
1841 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
1842 entry_nr->assigned_dev_id);
1843 if (!adev) {
1844 r = -EINVAL;
1845 goto msix_nr_out;
1846 }
1847
1848 if (adev->entries_nr == 0) {
1849 adev->entries_nr = entry_nr->entry_nr;
1850 if (adev->entries_nr == 0 ||
1851 adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) {
1852 r = -EINVAL;
1853 goto msix_nr_out;
1854 }
1855
1856 adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
1857 entry_nr->entry_nr,
1858 GFP_KERNEL);
1859 if (!adev->host_msix_entries) {
1860 r = -ENOMEM;
1861 goto msix_nr_out;
1862 }
1863 adev->guest_msix_entries = kzalloc(
1864 sizeof(struct kvm_guest_msix_entry) *
1865 entry_nr->entry_nr, GFP_KERNEL);
1866 if (!adev->guest_msix_entries) {
1867 kfree(adev->host_msix_entries);
1868 r = -ENOMEM;
1869 goto msix_nr_out;
1870 }
1871 } else /* Not allowed set MSI-X number twice */
1872 r = -EINVAL;
1873msix_nr_out:
1874 mutex_unlock(&kvm->lock);
1875 return r;
1876}
1877
1878static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
1879 struct kvm_assigned_msix_entry *entry)
1880{
1881 int r = 0, i;
1882 struct kvm_assigned_dev_kernel *adev;
1883
1884 mutex_lock(&kvm->lock);
1885
1886 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
1887 entry->assigned_dev_id);
1888
1889 if (!adev) {
1890 r = -EINVAL;
1891 goto msix_entry_out;
1892 }
1893
1894 for (i = 0; i < adev->entries_nr; i++)
1895 if (adev->guest_msix_entries[i].vector == 0 ||
1896 adev->guest_msix_entries[i].entry == entry->entry) {
1897 adev->guest_msix_entries[i].entry = entry->entry;
1898 adev->guest_msix_entries[i].vector = entry->gsi;
1899 adev->host_msix_entries[i].entry = entry->entry;
1900 break;
1901 }
1902 if (i == adev->entries_nr) {
1903 r = -ENOSPC;
1904 goto msix_entry_out;
1905 }
1906
1907msix_entry_out:
1908 mutex_unlock(&kvm->lock);
1909
1910 return r;
1911}
1912#endif
1913
bccf2150
AK
1914static long kvm_vcpu_ioctl(struct file *filp,
1915 unsigned int ioctl, unsigned long arg)
6aa8b732 1916{
bccf2150 1917 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 1918 void __user *argp = (void __user *)arg;
313a3dc7 1919 int r;
fa3795a7
DH
1920 struct kvm_fpu *fpu = NULL;
1921 struct kvm_sregs *kvm_sregs = NULL;
6aa8b732 1922
6d4e4c4f
AK
1923 if (vcpu->kvm->mm != current->mm)
1924 return -EIO;
6aa8b732 1925 switch (ioctl) {
9a2bb7f4 1926 case KVM_RUN:
f0fe5108
AK
1927 r = -EINVAL;
1928 if (arg)
1929 goto out;
b6c7a5dc 1930 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 1931 break;
6aa8b732 1932 case KVM_GET_REGS: {
3e4bb3ac 1933 struct kvm_regs *kvm_regs;
6aa8b732 1934
3e4bb3ac
XZ
1935 r = -ENOMEM;
1936 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1937 if (!kvm_regs)
6aa8b732 1938 goto out;
3e4bb3ac
XZ
1939 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1940 if (r)
1941 goto out_free1;
6aa8b732 1942 r = -EFAULT;
3e4bb3ac
XZ
1943 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1944 goto out_free1;
6aa8b732 1945 r = 0;
3e4bb3ac
XZ
1946out_free1:
1947 kfree(kvm_regs);
6aa8b732
AK
1948 break;
1949 }
1950 case KVM_SET_REGS: {
3e4bb3ac 1951 struct kvm_regs *kvm_regs;
6aa8b732 1952
3e4bb3ac
XZ
1953 r = -ENOMEM;
1954 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1955 if (!kvm_regs)
6aa8b732 1956 goto out;
3e4bb3ac
XZ
1957 r = -EFAULT;
1958 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1959 goto out_free2;
1960 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
6aa8b732 1961 if (r)
3e4bb3ac 1962 goto out_free2;
6aa8b732 1963 r = 0;
3e4bb3ac
XZ
1964out_free2:
1965 kfree(kvm_regs);
6aa8b732
AK
1966 break;
1967 }
1968 case KVM_GET_SREGS: {
fa3795a7
DH
1969 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1970 r = -ENOMEM;
1971 if (!kvm_sregs)
1972 goto out;
1973 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1974 if (r)
1975 goto out;
1976 r = -EFAULT;
fa3795a7 1977 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
6aa8b732
AK
1978 goto out;
1979 r = 0;
1980 break;
1981 }
1982 case KVM_SET_SREGS: {
fa3795a7
DH
1983 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1984 r = -ENOMEM;
1985 if (!kvm_sregs)
1986 goto out;
6aa8b732 1987 r = -EFAULT;
fa3795a7 1988 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
6aa8b732 1989 goto out;
fa3795a7 1990 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1991 if (r)
1992 goto out;
1993 r = 0;
1994 break;
1995 }
62d9f0db
MT
1996 case KVM_GET_MP_STATE: {
1997 struct kvm_mp_state mp_state;
1998
1999 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
2000 if (r)
2001 goto out;
2002 r = -EFAULT;
2003 if (copy_to_user(argp, &mp_state, sizeof mp_state))
2004 goto out;
2005 r = 0;
2006 break;
2007 }
2008 case KVM_SET_MP_STATE: {
2009 struct kvm_mp_state mp_state;
2010
2011 r = -EFAULT;
2012 if (copy_from_user(&mp_state, argp, sizeof mp_state))
2013 goto out;
2014 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
2015 if (r)
2016 goto out;
2017 r = 0;
2018 break;
2019 }
6aa8b732
AK
2020 case KVM_TRANSLATE: {
2021 struct kvm_translation tr;
2022
2023 r = -EFAULT;
2f366987 2024 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 2025 goto out;
8b006791 2026 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
2027 if (r)
2028 goto out;
2029 r = -EFAULT;
2f366987 2030 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
2031 goto out;
2032 r = 0;
2033 break;
2034 }
d0bfb940
JK
2035 case KVM_SET_GUEST_DEBUG: {
2036 struct kvm_guest_debug dbg;
6aa8b732
AK
2037
2038 r = -EFAULT;
2f366987 2039 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 2040 goto out;
d0bfb940 2041 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
6aa8b732
AK
2042 if (r)
2043 goto out;
2044 r = 0;
2045 break;
2046 }
1961d276
AK
2047 case KVM_SET_SIGNAL_MASK: {
2048 struct kvm_signal_mask __user *sigmask_arg = argp;
2049 struct kvm_signal_mask kvm_sigmask;
2050 sigset_t sigset, *p;
2051
2052 p = NULL;
2053 if (argp) {
2054 r = -EFAULT;
2055 if (copy_from_user(&kvm_sigmask, argp,
2056 sizeof kvm_sigmask))
2057 goto out;
2058 r = -EINVAL;
2059 if (kvm_sigmask.len != sizeof sigset)
2060 goto out;
2061 r = -EFAULT;
2062 if (copy_from_user(&sigset, sigmask_arg->sigset,
2063 sizeof sigset))
2064 goto out;
2065 p = &sigset;
2066 }
2067 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2068 break;
2069 }
b8836737 2070 case KVM_GET_FPU: {
fa3795a7
DH
2071 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
2072 r = -ENOMEM;
2073 if (!fpu)
2074 goto out;
2075 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
b8836737
AK
2076 if (r)
2077 goto out;
2078 r = -EFAULT;
fa3795a7 2079 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
b8836737
AK
2080 goto out;
2081 r = 0;
2082 break;
2083 }
2084 case KVM_SET_FPU: {
fa3795a7
DH
2085 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
2086 r = -ENOMEM;
2087 if (!fpu)
2088 goto out;
b8836737 2089 r = -EFAULT;
fa3795a7 2090 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
b8836737 2091 goto out;
fa3795a7 2092 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
b8836737
AK
2093 if (r)
2094 goto out;
2095 r = 0;
2096 break;
2097 }
bccf2150 2098 default:
313a3dc7 2099 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
2100 }
2101out:
fa3795a7
DH
2102 kfree(fpu);
2103 kfree(kvm_sregs);
bccf2150
AK
2104 return r;
2105}
2106
2107static long kvm_vm_ioctl(struct file *filp,
2108 unsigned int ioctl, unsigned long arg)
2109{
2110 struct kvm *kvm = filp->private_data;
2111 void __user *argp = (void __user *)arg;
1fe779f8 2112 int r;
bccf2150 2113
6d4e4c4f
AK
2114 if (kvm->mm != current->mm)
2115 return -EIO;
bccf2150
AK
2116 switch (ioctl) {
2117 case KVM_CREATE_VCPU:
2118 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
2119 if (r < 0)
2120 goto out;
2121 break;
6fc138d2
IE
2122 case KVM_SET_USER_MEMORY_REGION: {
2123 struct kvm_userspace_memory_region kvm_userspace_mem;
2124
2125 r = -EFAULT;
2126 if (copy_from_user(&kvm_userspace_mem, argp,
2127 sizeof kvm_userspace_mem))
2128 goto out;
2129
2130 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
2131 if (r)
2132 goto out;
2133 break;
2134 }
2135 case KVM_GET_DIRTY_LOG: {
2136 struct kvm_dirty_log log;
2137
2138 r = -EFAULT;
2f366987 2139 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 2140 goto out;
2c6f5df9 2141 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
2142 if (r)
2143 goto out;
2144 break;
2145 }
5f94c174
LV
2146#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2147 case KVM_REGISTER_COALESCED_MMIO: {
2148 struct kvm_coalesced_mmio_zone zone;
2149 r = -EFAULT;
2150 if (copy_from_user(&zone, argp, sizeof zone))
2151 goto out;
2152 r = -ENXIO;
2153 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
2154 if (r)
2155 goto out;
2156 r = 0;
2157 break;
2158 }
2159 case KVM_UNREGISTER_COALESCED_MMIO: {
2160 struct kvm_coalesced_mmio_zone zone;
2161 r = -EFAULT;
2162 if (copy_from_user(&zone, argp, sizeof zone))
2163 goto out;
2164 r = -ENXIO;
2165 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
2166 if (r)
2167 goto out;
2168 r = 0;
2169 break;
2170 }
8a98f664
XZ
2171#endif
2172#ifdef KVM_CAP_DEVICE_ASSIGNMENT
2173 case KVM_ASSIGN_PCI_DEVICE: {
2174 struct kvm_assigned_pci_dev assigned_dev;
2175
2176 r = -EFAULT;
2177 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
2178 goto out;
2179 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
2180 if (r)
2181 goto out;
2182 break;
2183 }
2184 case KVM_ASSIGN_IRQ: {
e56d532f
SY
2185 r = -EOPNOTSUPP;
2186 break;
2187 }
2188#ifdef KVM_CAP_ASSIGN_DEV_IRQ
2189 case KVM_ASSIGN_DEV_IRQ: {
8a98f664
XZ
2190 struct kvm_assigned_irq assigned_irq;
2191
2192 r = -EFAULT;
2193 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
2194 goto out;
2195 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
2196 if (r)
2197 goto out;
2198 break;
2199 }
e56d532f
SY
2200 case KVM_DEASSIGN_DEV_IRQ: {
2201 struct kvm_assigned_irq assigned_irq;
2202
2203 r = -EFAULT;
2204 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
2205 goto out;
2206 r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
2207 if (r)
2208 goto out;
2209 break;
2210 }
2211#endif
0a920356
WH
2212#endif
2213#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
2214 case KVM_DEASSIGN_PCI_DEVICE: {
2215 struct kvm_assigned_pci_dev assigned_dev;
2216
2217 r = -EFAULT;
2218 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
2219 goto out;
2220 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
2221 if (r)
2222 goto out;
2223 break;
2224 }
399ec807
AK
2225#endif
2226#ifdef KVM_CAP_IRQ_ROUTING
2227 case KVM_SET_GSI_ROUTING: {
2228 struct kvm_irq_routing routing;
2229 struct kvm_irq_routing __user *urouting;
2230 struct kvm_irq_routing_entry *entries;
2231
2232 r = -EFAULT;
2233 if (copy_from_user(&routing, argp, sizeof(routing)))
2234 goto out;
2235 r = -EINVAL;
2236 if (routing.nr >= KVM_MAX_IRQ_ROUTES)
2237 goto out;
2238 if (routing.flags)
2239 goto out;
2240 r = -ENOMEM;
2241 entries = vmalloc(routing.nr * sizeof(*entries));
2242 if (!entries)
2243 goto out;
2244 r = -EFAULT;
2245 urouting = argp;
2246 if (copy_from_user(entries, urouting->entries,
2247 routing.nr * sizeof(*entries)))
2248 goto out_free_irq_routing;
2249 r = kvm_set_irq_routing(kvm, entries, routing.nr,
2250 routing.flags);
2251 out_free_irq_routing:
2252 vfree(entries);
2253 break;
2254 }
6621fbc2 2255#endif /* KVM_CAP_IRQ_ROUTING */
c1e01514
SY
2256#ifdef __KVM_HAVE_MSIX
2257 case KVM_ASSIGN_SET_MSIX_NR: {
2258 struct kvm_assigned_msix_nr entry_nr;
2259 r = -EFAULT;
2260 if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
2261 goto out;
2262 r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
2263 if (r)
2264 goto out;
2265 break;
2266 }
2267 case KVM_ASSIGN_SET_MSIX_ENTRY: {
2268 struct kvm_assigned_msix_entry entry;
2269 r = -EFAULT;
2270 if (copy_from_user(&entry, argp, sizeof entry))
2271 goto out;
2272 r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
2273 if (r)
2274 goto out;
2275 break;
2276 }
5f94c174 2277#endif
721eecbf
GH
2278 case KVM_IRQFD: {
2279 struct kvm_irqfd data;
2280
2281 r = -EFAULT;
2282 if (copy_from_user(&data, argp, sizeof data))
2283 goto out;
2284 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
2285 break;
2286 }
d34e6b17
GH
2287 case KVM_IOEVENTFD: {
2288 struct kvm_ioeventfd data;
2289
2290 r = -EFAULT;
2291 if (copy_from_user(&data, argp, sizeof data))
2292 goto out;
2293 r = kvm_ioeventfd(kvm, &data);
2294 break;
2295 }
73880c80
GN
2296#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2297 case KVM_SET_BOOT_CPU_ID:
2298 r = 0;
894a9c55 2299 mutex_lock(&kvm->lock);
73880c80
GN
2300 if (atomic_read(&kvm->online_vcpus) != 0)
2301 r = -EBUSY;
2302 else
2303 kvm->bsp_vcpu_id = arg;
894a9c55 2304 mutex_unlock(&kvm->lock);
73880c80
GN
2305 break;
2306#endif
f17abe9a 2307 default:
1fe779f8 2308 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
2309 }
2310out:
2311 return r;
2312}
2313
e4a533a4 2314static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
f17abe9a 2315{
777b3f49
MT
2316 struct page *page[1];
2317 unsigned long addr;
2318 int npages;
2319 gfn_t gfn = vmf->pgoff;
f17abe9a 2320 struct kvm *kvm = vma->vm_file->private_data;
f17abe9a 2321
777b3f49
MT
2322 addr = gfn_to_hva(kvm, gfn);
2323 if (kvm_is_error_hva(addr))
e4a533a4 2324 return VM_FAULT_SIGBUS;
777b3f49
MT
2325
2326 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
2327 NULL);
2328 if (unlikely(npages != 1))
e4a533a4 2329 return VM_FAULT_SIGBUS;
777b3f49
MT
2330
2331 vmf->page = page[0];
e4a533a4 2332 return 0;
f17abe9a
AK
2333}
2334
f0f37e2f 2335static const struct vm_operations_struct kvm_vm_vm_ops = {
e4a533a4 2336 .fault = kvm_vm_fault,
f17abe9a
AK
2337};
2338
2339static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2340{
2341 vma->vm_ops = &kvm_vm_vm_ops;
2342 return 0;
2343}
2344
3d3aab1b 2345static struct file_operations kvm_vm_fops = {
f17abe9a
AK
2346 .release = kvm_vm_release,
2347 .unlocked_ioctl = kvm_vm_ioctl,
2348 .compat_ioctl = kvm_vm_ioctl,
2349 .mmap = kvm_vm_mmap,
2350};
2351
2352static int kvm_dev_ioctl_create_vm(void)
2353{
2030a42c 2354 int fd;
f17abe9a
AK
2355 struct kvm *kvm;
2356
f17abe9a 2357 kvm = kvm_create_vm();
d6d28168
AK
2358 if (IS_ERR(kvm))
2359 return PTR_ERR(kvm);
7d9dbca3 2360 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
2030a42c 2361 if (fd < 0)
66c0b394 2362 kvm_put_kvm(kvm);
f17abe9a 2363
f17abe9a 2364 return fd;
f17abe9a
AK
2365}
2366
1a811b61
AK
2367static long kvm_dev_ioctl_check_extension_generic(long arg)
2368{
2369 switch (arg) {
ca9edaee 2370 case KVM_CAP_USER_MEMORY:
1a811b61 2371 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4cd481f6 2372 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
73880c80
GN
2373#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2374 case KVM_CAP_SET_BOOT_CPU_ID:
2375#endif
1a811b61 2376 return 1;
399ec807
AK
2377#ifdef CONFIG_HAVE_KVM_IRQCHIP
2378 case KVM_CAP_IRQ_ROUTING:
36463146 2379 return KVM_MAX_IRQ_ROUTES;
399ec807 2380#endif
1a811b61
AK
2381 default:
2382 break;
2383 }
2384 return kvm_dev_ioctl_check_extension(arg);
2385}
2386
f17abe9a
AK
2387static long kvm_dev_ioctl(struct file *filp,
2388 unsigned int ioctl, unsigned long arg)
2389{
07c45a36 2390 long r = -EINVAL;
f17abe9a
AK
2391
2392 switch (ioctl) {
2393 case KVM_GET_API_VERSION:
f0fe5108
AK
2394 r = -EINVAL;
2395 if (arg)
2396 goto out;
f17abe9a
AK
2397 r = KVM_API_VERSION;
2398 break;
2399 case KVM_CREATE_VM:
f0fe5108
AK
2400 r = -EINVAL;
2401 if (arg)
2402 goto out;
f17abe9a
AK
2403 r = kvm_dev_ioctl_create_vm();
2404 break;
018d00d2 2405 case KVM_CHECK_EXTENSION:
1a811b61 2406 r = kvm_dev_ioctl_check_extension_generic(arg);
5d308f45 2407 break;
07c45a36
AK
2408 case KVM_GET_VCPU_MMAP_SIZE:
2409 r = -EINVAL;
2410 if (arg)
2411 goto out;
adb1ff46
AK
2412 r = PAGE_SIZE; /* struct kvm_run */
2413#ifdef CONFIG_X86
2414 r += PAGE_SIZE; /* pio data page */
5f94c174
LV
2415#endif
2416#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2417 r += PAGE_SIZE; /* coalesced mmio ring page */
adb1ff46 2418#endif
07c45a36 2419 break;
d4c9ff2d
FEL
2420 case KVM_TRACE_ENABLE:
2421 case KVM_TRACE_PAUSE:
2422 case KVM_TRACE_DISABLE:
2023a29c 2423 r = -EOPNOTSUPP;
d4c9ff2d 2424 break;
6aa8b732 2425 default:
043405e1 2426 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
2427 }
2428out:
2429 return r;
2430}
2431
6aa8b732 2432static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
2433 .unlocked_ioctl = kvm_dev_ioctl,
2434 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
2435};
2436
2437static struct miscdevice kvm_dev = {
bbe4432e 2438 KVM_MINOR,
6aa8b732
AK
2439 "kvm",
2440 &kvm_chardev_ops,
2441};
2442
1b6c0168
AK
2443static void hardware_enable(void *junk)
2444{
2445 int cpu = raw_smp_processor_id();
2446
7f59f492 2447 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 2448 return;
7f59f492 2449 cpumask_set_cpu(cpu, cpus_hardware_enabled);
e9b11c17 2450 kvm_arch_hardware_enable(NULL);
1b6c0168
AK
2451}
2452
2453static void hardware_disable(void *junk)
2454{
2455 int cpu = raw_smp_processor_id();
2456
7f59f492 2457 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 2458 return;
7f59f492 2459 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
e9b11c17 2460 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
2461}
2462
774c47f1
AK
2463static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2464 void *v)
2465{
2466 int cpu = (long)v;
2467
1a6f4d7f 2468 val &= ~CPU_TASKS_FROZEN;
774c47f1 2469 switch (val) {
cec9ad27 2470 case CPU_DYING:
6ec8a856
AK
2471 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2472 cpu);
2473 hardware_disable(NULL);
2474 break;
774c47f1 2475 case CPU_UP_CANCELED:
43934a38
JK
2476 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2477 cpu);
8691e5a8 2478 smp_call_function_single(cpu, hardware_disable, NULL, 1);
774c47f1 2479 break;
43934a38
JK
2480 case CPU_ONLINE:
2481 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2482 cpu);
8691e5a8 2483 smp_call_function_single(cpu, hardware_enable, NULL, 1);
774c47f1
AK
2484 break;
2485 }
2486 return NOTIFY_OK;
2487}
2488
4ecac3fd
AK
2489
2490asmlinkage void kvm_handle_fault_on_reboot(void)
2491{
2492 if (kvm_rebooting)
2493 /* spin while reset goes on */
2494 while (true)
2495 ;
2496 /* Fault while not rebooting. We want the trace. */
2497 BUG();
2498}
2499EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
2500
9a2b85c6 2501static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 2502 void *v)
9a2b85c6 2503{
8e1c1815
SY
2504 /*
2505 * Some (well, at least mine) BIOSes hang on reboot if
2506 * in vmx root mode.
2507 *
2508 * And Intel TXT required VMX off for all cpu when system shutdown.
2509 */
2510 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2511 kvm_rebooting = true;
2512 on_each_cpu(hardware_disable, NULL, 1);
9a2b85c6
RR
2513 return NOTIFY_OK;
2514}
2515
2516static struct notifier_block kvm_reboot_notifier = {
2517 .notifier_call = kvm_reboot,
2518 .priority = 0,
2519};
2520
2eeb2e94
GH
2521void kvm_io_bus_init(struct kvm_io_bus *bus)
2522{
2523 memset(bus, 0, sizeof(*bus));
2524}
2525
2526void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2527{
2528 int i;
2529
2530 for (i = 0; i < bus->dev_count; i++) {
2531 struct kvm_io_device *pos = bus->devs[i];
2532
2533 kvm_iodevice_destructor(pos);
2534 }
2535}
2536
bda9020e
MT
2537/* kvm_io_bus_write - called under kvm->slots_lock */
2538int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
2539 int len, const void *val)
2eeb2e94
GH
2540{
2541 int i;
bda9020e
MT
2542 for (i = 0; i < bus->dev_count; i++)
2543 if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
2544 return 0;
2545 return -EOPNOTSUPP;
2546}
2eeb2e94 2547
bda9020e
MT
2548/* kvm_io_bus_read - called under kvm->slots_lock */
2549int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val)
2550{
2551 int i;
2552 for (i = 0; i < bus->dev_count; i++)
2553 if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
2554 return 0;
2555 return -EOPNOTSUPP;
2eeb2e94
GH
2556}
2557
090b7aff 2558int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
6c474694
MT
2559 struct kvm_io_device *dev)
2560{
090b7aff
GH
2561 int ret;
2562
6c474694 2563 down_write(&kvm->slots_lock);
090b7aff 2564 ret = __kvm_io_bus_register_dev(bus, dev);
6c474694 2565 up_write(&kvm->slots_lock);
090b7aff
GH
2566
2567 return ret;
6c474694
MT
2568}
2569
2570/* An unlocked version. Caller must have write lock on slots_lock. */
090b7aff
GH
2571int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
2572 struct kvm_io_device *dev)
2eeb2e94 2573{
090b7aff
GH
2574 if (bus->dev_count > NR_IOBUS_DEVS-1)
2575 return -ENOSPC;
2eeb2e94
GH
2576
2577 bus->devs[bus->dev_count++] = dev;
090b7aff
GH
2578
2579 return 0;
2580}
2581
2582void kvm_io_bus_unregister_dev(struct kvm *kvm,
2583 struct kvm_io_bus *bus,
2584 struct kvm_io_device *dev)
2585{
2586 down_write(&kvm->slots_lock);
2587 __kvm_io_bus_unregister_dev(bus, dev);
2588 up_write(&kvm->slots_lock);
2589}
2590
2591/* An unlocked version. Caller must have write lock on slots_lock. */
2592void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
2593 struct kvm_io_device *dev)
2594{
2595 int i;
2596
2597 for (i = 0; i < bus->dev_count; i++)
2598 if (bus->devs[i] == dev) {
2599 bus->devs[i] = bus->devs[--bus->dev_count];
2600 break;
2601 }
2eeb2e94
GH
2602}
2603
774c47f1
AK
2604static struct notifier_block kvm_cpu_notifier = {
2605 .notifier_call = kvm_cpu_hotplug,
2606 .priority = 20, /* must be > scheduler priority */
2607};
2608
8b88b099 2609static int vm_stat_get(void *_offset, u64 *val)
ba1389b7
AK
2610{
2611 unsigned offset = (long)_offset;
ba1389b7
AK
2612 struct kvm *kvm;
2613
8b88b099 2614 *val = 0;
ba1389b7
AK
2615 spin_lock(&kvm_lock);
2616 list_for_each_entry(kvm, &vm_list, vm_list)
8b88b099 2617 *val += *(u32 *)((void *)kvm + offset);
ba1389b7 2618 spin_unlock(&kvm_lock);
8b88b099 2619 return 0;
ba1389b7
AK
2620}
2621
2622DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
2623
8b88b099 2624static int vcpu_stat_get(void *_offset, u64 *val)
1165f5fe
AK
2625{
2626 unsigned offset = (long)_offset;
1165f5fe
AK
2627 struct kvm *kvm;
2628 struct kvm_vcpu *vcpu;
2629 int i;
2630
8b88b099 2631 *val = 0;
1165f5fe
AK
2632 spin_lock(&kvm_lock);
2633 list_for_each_entry(kvm, &vm_list, vm_list)
988a2cae
GN
2634 kvm_for_each_vcpu(i, vcpu, kvm)
2635 *val += *(u32 *)((void *)vcpu + offset);
2636
1165f5fe 2637 spin_unlock(&kvm_lock);
8b88b099 2638 return 0;
1165f5fe
AK
2639}
2640
ba1389b7
AK
2641DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2642
828c0950 2643static const struct file_operations *stat_fops[] = {
ba1389b7
AK
2644 [KVM_STAT_VCPU] = &vcpu_stat_fops,
2645 [KVM_STAT_VM] = &vm_stat_fops,
2646};
1165f5fe 2647
a16b043c 2648static void kvm_init_debug(void)
6aa8b732
AK
2649{
2650 struct kvm_stats_debugfs_item *p;
2651
76f7c879 2652 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 2653 for (p = debugfs_entries; p->name; ++p)
76f7c879 2654 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
1165f5fe 2655 (void *)(long)p->offset,
ba1389b7 2656 stat_fops[p->kind]);
6aa8b732
AK
2657}
2658
2659static void kvm_exit_debug(void)
2660{
2661 struct kvm_stats_debugfs_item *p;
2662
2663 for (p = debugfs_entries; p->name; ++p)
2664 debugfs_remove(p->dentry);
76f7c879 2665 debugfs_remove(kvm_debugfs_dir);
6aa8b732
AK
2666}
2667
59ae6c6b
AK
2668static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2669{
4267c41a 2670 hardware_disable(NULL);
59ae6c6b
AK
2671 return 0;
2672}
2673
2674static int kvm_resume(struct sys_device *dev)
2675{
4267c41a 2676 hardware_enable(NULL);
59ae6c6b
AK
2677 return 0;
2678}
2679
2680static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 2681 .name = "kvm",
59ae6c6b
AK
2682 .suspend = kvm_suspend,
2683 .resume = kvm_resume,
2684};
2685
2686static struct sys_device kvm_sysdev = {
2687 .id = 0,
2688 .cls = &kvm_sysdev_class,
2689};
2690
cea7bb21 2691struct page *bad_page;
35149e21 2692pfn_t bad_pfn;
6aa8b732 2693
15ad7146
AK
2694static inline
2695struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2696{
2697 return container_of(pn, struct kvm_vcpu, preempt_notifier);
2698}
2699
2700static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2701{
2702 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2703
e9b11c17 2704 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
2705}
2706
2707static void kvm_sched_out(struct preempt_notifier *pn,
2708 struct task_struct *next)
2709{
2710 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2711
e9b11c17 2712 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
2713}
2714
f8c16bba 2715int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 2716 struct module *module)
6aa8b732
AK
2717{
2718 int r;
002c7f7c 2719 int cpu;
6aa8b732 2720
f8c16bba
ZX
2721 r = kvm_arch_init(opaque);
2722 if (r)
d2308784 2723 goto out_fail;
cb498ea2
ZX
2724
2725 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2726
2727 if (bad_page == NULL) {
2728 r = -ENOMEM;
2729 goto out;
2730 }
2731
35149e21
AL
2732 bad_pfn = page_to_pfn(bad_page);
2733
8437a617 2734 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
7f59f492
RR
2735 r = -ENOMEM;
2736 goto out_free_0;
2737 }
2738
e9b11c17 2739 r = kvm_arch_hardware_setup();
6aa8b732 2740 if (r < 0)
7f59f492 2741 goto out_free_0a;
6aa8b732 2742
002c7f7c
YS
2743 for_each_online_cpu(cpu) {
2744 smp_call_function_single(cpu,
e9b11c17 2745 kvm_arch_check_processor_compat,
8691e5a8 2746 &r, 1);
002c7f7c 2747 if (r < 0)
d2308784 2748 goto out_free_1;
002c7f7c
YS
2749 }
2750
15c8b6c1 2751 on_each_cpu(hardware_enable, NULL, 1);
774c47f1
AK
2752 r = register_cpu_notifier(&kvm_cpu_notifier);
2753 if (r)
d2308784 2754 goto out_free_2;
6aa8b732
AK
2755 register_reboot_notifier(&kvm_reboot_notifier);
2756
59ae6c6b
AK
2757 r = sysdev_class_register(&kvm_sysdev_class);
2758 if (r)
d2308784 2759 goto out_free_3;
59ae6c6b
AK
2760
2761 r = sysdev_register(&kvm_sysdev);
2762 if (r)
d2308784 2763 goto out_free_4;
59ae6c6b 2764
c16f862d
RR
2765 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2766 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
56919c5c
JP
2767 __alignof__(struct kvm_vcpu),
2768 0, NULL);
c16f862d
RR
2769 if (!kvm_vcpu_cache) {
2770 r = -ENOMEM;
d2308784 2771 goto out_free_5;
c16f862d
RR
2772 }
2773
6aa8b732 2774 kvm_chardev_ops.owner = module;
3d3aab1b
CB
2775 kvm_vm_fops.owner = module;
2776 kvm_vcpu_fops.owner = module;
6aa8b732
AK
2777
2778 r = misc_register(&kvm_dev);
2779 if (r) {
d77c26fc 2780 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
2781 goto out_free;
2782 }
2783
15ad7146
AK
2784 kvm_preempt_ops.sched_in = kvm_sched_in;
2785 kvm_preempt_ops.sched_out = kvm_sched_out;
2786
0ea4ed8e
DW
2787 kvm_init_debug();
2788
c7addb90 2789 return 0;
6aa8b732
AK
2790
2791out_free:
c16f862d 2792 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 2793out_free_5:
59ae6c6b 2794 sysdev_unregister(&kvm_sysdev);
d2308784 2795out_free_4:
59ae6c6b 2796 sysdev_class_unregister(&kvm_sysdev_class);
d2308784 2797out_free_3:
6aa8b732 2798 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1 2799 unregister_cpu_notifier(&kvm_cpu_notifier);
d2308784 2800out_free_2:
15c8b6c1 2801 on_each_cpu(hardware_disable, NULL, 1);
d2308784 2802out_free_1:
e9b11c17 2803 kvm_arch_hardware_unsetup();
7f59f492
RR
2804out_free_0a:
2805 free_cpumask_var(cpus_hardware_enabled);
d2308784
ZX
2806out_free_0:
2807 __free_page(bad_page);
ca45aaae 2808out:
f8c16bba 2809 kvm_arch_exit();
d2308784 2810out_fail:
6aa8b732
AK
2811 return r;
2812}
cb498ea2 2813EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 2814
cb498ea2 2815void kvm_exit(void)
6aa8b732 2816{
229456fc 2817 tracepoint_synchronize_unregister();
0ea4ed8e 2818 kvm_exit_debug();
6aa8b732 2819 misc_deregister(&kvm_dev);
c16f862d 2820 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
2821 sysdev_unregister(&kvm_sysdev);
2822 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 2823 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 2824 unregister_cpu_notifier(&kvm_cpu_notifier);
15c8b6c1 2825 on_each_cpu(hardware_disable, NULL, 1);
e9b11c17 2826 kvm_arch_hardware_unsetup();
f8c16bba 2827 kvm_arch_exit();
7f59f492 2828 free_cpumask_var(cpus_hardware_enabled);
cea7bb21 2829 __free_page(bad_page);
6aa8b732 2830}
cb498ea2 2831EXPORT_SYMBOL_GPL(kvm_exit);
This page took 0.485469 seconds and 5 git commands to generate.