KVM: x86: Add module parameter for lapic periodic timer limit
[deliverable/linux.git] / virt / kvm / assigned-dev.c
CommitLineData
bfd99ff5
AK
1/*
2 * Kernel-based Virtual Machine - device assignment support
3 *
221d059d 4 * Copyright (C) 2010 Red Hat, Inc. and/or its affiliates.
bfd99ff5
AK
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
11#include <linux/kvm_host.h>
12#include <linux/kvm.h>
13#include <linux/uaccess.h>
14#include <linux/vmalloc.h>
15#include <linux/errno.h>
16#include <linux/spinlock.h>
17#include <linux/pci.h>
18#include <linux/interrupt.h>
5a0e3ad6 19#include <linux/slab.h>
bfd99ff5
AK
20#include "irq.h"
21
22static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
23 int assigned_dev_id)
24{
25 struct list_head *ptr;
26 struct kvm_assigned_dev_kernel *match;
27
28 list_for_each(ptr, head) {
29 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
30 if (match->assigned_dev_id == assigned_dev_id)
31 return match;
32 }
33 return NULL;
34}
35
36static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
37 *assigned_dev, int irq)
38{
39 int i, index;
40 struct msix_entry *host_msix_entries;
41
42 host_msix_entries = assigned_dev->host_msix_entries;
43
44 index = -1;
45 for (i = 0; i < assigned_dev->entries_nr; i++)
46 if (irq == host_msix_entries[i].vector) {
47 index = i;
48 break;
49 }
50 if (index < 0) {
51 printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
52 return 0;
53 }
54
55 return index;
56}
57
0645211c 58static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id)
bfd99ff5 59{
0645211c
JK
60 struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
61 u32 vector;
62 int index;
bfd99ff5 63
0645211c
JK
64 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) {
65 spin_lock(&assigned_dev->intx_lock);
66 disable_irq_nosync(irq);
67 assigned_dev->host_irq_disabled = true;
68 spin_unlock(&assigned_dev->intx_lock);
69 }
bfd99ff5 70
bfd99ff5 71 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
0645211c
JK
72 index = find_index_from_host_irq(assigned_dev, irq);
73 if (index >= 0) {
74 vector = assigned_dev->
75 guest_msix_entries[index].vector;
bfd99ff5 76 kvm_set_irq(assigned_dev->kvm,
0645211c 77 assigned_dev->irq_source_id, vector, 1);
bfd99ff5
AK
78 }
79 } else
80 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
81 assigned_dev->guest_irq, 1);
82
bfd99ff5
AK
83 return IRQ_HANDLED;
84}
85
86/* Ack the irq line for an assigned device */
87static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
88{
c61fa9d6
JK
89 struct kvm_assigned_dev_kernel *dev =
90 container_of(kian, struct kvm_assigned_dev_kernel,
91 ack_notifier);
bfd99ff5
AK
92
93 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
94
95 /* The guest irq may be shared so this ack may be
96 * from another device.
97 */
0645211c 98 spin_lock(&dev->intx_lock);
bfd99ff5
AK
99 if (dev->host_irq_disabled) {
100 enable_irq(dev->host_irq);
101 dev->host_irq_disabled = false;
102 }
0645211c 103 spin_unlock(&dev->intx_lock);
bfd99ff5
AK
104}
105
106static void deassign_guest_irq(struct kvm *kvm,
107 struct kvm_assigned_dev_kernel *assigned_dev)
108{
c61fa9d6
JK
109 if (assigned_dev->ack_notifier.gsi != -1)
110 kvm_unregister_irq_ack_notifier(kvm,
111 &assigned_dev->ack_notifier);
bfd99ff5 112
0c106b5a
JK
113 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
114 assigned_dev->guest_irq, 0);
115
bfd99ff5
AK
116 if (assigned_dev->irq_source_id != -1)
117 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
118 assigned_dev->irq_source_id = -1;
119 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
120}
121
122/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
123static void deassign_host_irq(struct kvm *kvm,
124 struct kvm_assigned_dev_kernel *assigned_dev)
125{
126 /*
0645211c 127 * We disable irq here to prevent further events.
bfd99ff5
AK
128 *
129 * Notice this maybe result in nested disable if the interrupt type is
130 * INTx, but it's OK for we are going to free it.
131 *
132 * If this function is a part of VM destroy, please ensure that till
133 * now, the kvm state is still legal for probably we also have to wait
0645211c 134 * on a currently running IRQ handler.
bfd99ff5
AK
135 */
136 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
137 int i;
138 for (i = 0; i < assigned_dev->entries_nr; i++)
0645211c 139 disable_irq(assigned_dev->host_msix_entries[i].vector);
bfd99ff5
AK
140
141 for (i = 0; i < assigned_dev->entries_nr; i++)
142 free_irq(assigned_dev->host_msix_entries[i].vector,
9f9f6b78 143 assigned_dev);
bfd99ff5
AK
144
145 assigned_dev->entries_nr = 0;
146 kfree(assigned_dev->host_msix_entries);
147 kfree(assigned_dev->guest_msix_entries);
148 pci_disable_msix(assigned_dev->dev);
149 } else {
150 /* Deal with MSI and INTx */
0645211c 151 disable_irq(assigned_dev->host_irq);
bfd99ff5 152
9f9f6b78 153 free_irq(assigned_dev->host_irq, assigned_dev);
bfd99ff5
AK
154
155 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
156 pci_disable_msi(assigned_dev->dev);
157 }
158
159 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
160}
161
162static int kvm_deassign_irq(struct kvm *kvm,
163 struct kvm_assigned_dev_kernel *assigned_dev,
164 unsigned long irq_requested_type)
165{
166 unsigned long guest_irq_type, host_irq_type;
167
168 if (!irqchip_in_kernel(kvm))
169 return -EINVAL;
170 /* no irq assignment to deassign */
171 if (!assigned_dev->irq_requested_type)
172 return -ENXIO;
173
174 host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
175 guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
176
177 if (host_irq_type)
178 deassign_host_irq(kvm, assigned_dev);
179 if (guest_irq_type)
180 deassign_guest_irq(kvm, assigned_dev);
181
182 return 0;
183}
184
185static void kvm_free_assigned_irq(struct kvm *kvm,
186 struct kvm_assigned_dev_kernel *assigned_dev)
187{
188 kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
189}
190
191static void kvm_free_assigned_device(struct kvm *kvm,
192 struct kvm_assigned_dev_kernel
193 *assigned_dev)
194{
195 kvm_free_assigned_irq(kvm, assigned_dev);
196
f8fcfd77
AW
197 pci_reset_function(assigned_dev->dev);
198 if (pci_load_and_free_saved_state(assigned_dev->dev,
199 &assigned_dev->pci_saved_state))
200 printk(KERN_INFO "%s: Couldn't reload %s saved state\n",
201 __func__, dev_name(&assigned_dev->dev->dev));
202 else
203 pci_restore_state(assigned_dev->dev);
bfd99ff5
AK
204
205 pci_release_regions(assigned_dev->dev);
206 pci_disable_device(assigned_dev->dev);
207 pci_dev_put(assigned_dev->dev);
208
209 list_del(&assigned_dev->list);
210 kfree(assigned_dev);
211}
212
213void kvm_free_all_assigned_devices(struct kvm *kvm)
214{
215 struct list_head *ptr, *ptr2;
216 struct kvm_assigned_dev_kernel *assigned_dev;
217
218 list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
219 assigned_dev = list_entry(ptr,
220 struct kvm_assigned_dev_kernel,
221 list);
222
223 kvm_free_assigned_device(kvm, assigned_dev);
224 }
225}
226
227static int assigned_device_enable_host_intx(struct kvm *kvm,
228 struct kvm_assigned_dev_kernel *dev)
229{
230 dev->host_irq = dev->dev->irq;
231 /* Even though this is PCI, we don't want to use shared
232 * interrupts. Sharing host devices with guest-assigned devices
233 * on the same interrupt line is not a happy situation: there
234 * are going to be long delays in accepting, acking, etc.
235 */
0645211c 236 if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
9f9f6b78 237 IRQF_ONESHOT, dev->irq_name, dev))
bfd99ff5
AK
238 return -EIO;
239 return 0;
240}
241
242#ifdef __KVM_HAVE_MSI
243static int assigned_device_enable_host_msi(struct kvm *kvm,
244 struct kvm_assigned_dev_kernel *dev)
245{
246 int r;
247
248 if (!dev->dev->msi_enabled) {
249 r = pci_enable_msi(dev->dev);
250 if (r)
251 return r;
252 }
253
254 dev->host_irq = dev->dev->irq;
0645211c 255 if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
9f9f6b78 256 0, dev->irq_name, dev)) {
bfd99ff5
AK
257 pci_disable_msi(dev->dev);
258 return -EIO;
259 }
260
261 return 0;
262}
263#endif
264
265#ifdef __KVM_HAVE_MSIX
266static int assigned_device_enable_host_msix(struct kvm *kvm,
267 struct kvm_assigned_dev_kernel *dev)
268{
269 int i, r = -EINVAL;
270
271 /* host_msix_entries and guest_msix_entries should have been
272 * initialized */
273 if (dev->entries_nr == 0)
274 return r;
275
276 r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
277 if (r)
278 return r;
279
280 for (i = 0; i < dev->entries_nr; i++) {
0645211c
JK
281 r = request_threaded_irq(dev->host_msix_entries[i].vector,
282 NULL, kvm_assigned_dev_thread,
9f9f6b78 283 0, dev->irq_name, dev);
bfd99ff5 284 if (r)
d57e2c07 285 goto err;
bfd99ff5
AK
286 }
287
288 return 0;
d57e2c07 289err:
290 for (i -= 1; i >= 0; i--)
9f9f6b78 291 free_irq(dev->host_msix_entries[i].vector, dev);
d57e2c07 292 pci_disable_msix(dev->dev);
293 return r;
bfd99ff5
AK
294}
295
296#endif
297
298static int assigned_device_enable_guest_intx(struct kvm *kvm,
299 struct kvm_assigned_dev_kernel *dev,
300 struct kvm_assigned_irq *irq)
301{
302 dev->guest_irq = irq->guest_irq;
303 dev->ack_notifier.gsi = irq->guest_irq;
304 return 0;
305}
306
307#ifdef __KVM_HAVE_MSI
308static int assigned_device_enable_guest_msi(struct kvm *kvm,
309 struct kvm_assigned_dev_kernel *dev,
310 struct kvm_assigned_irq *irq)
311{
312 dev->guest_irq = irq->guest_irq;
313 dev->ack_notifier.gsi = -1;
314 dev->host_irq_disabled = false;
315 return 0;
316}
317#endif
318
319#ifdef __KVM_HAVE_MSIX
320static int assigned_device_enable_guest_msix(struct kvm *kvm,
321 struct kvm_assigned_dev_kernel *dev,
322 struct kvm_assigned_irq *irq)
323{
324 dev->guest_irq = irq->guest_irq;
325 dev->ack_notifier.gsi = -1;
326 dev->host_irq_disabled = false;
327 return 0;
328}
329#endif
330
331static int assign_host_irq(struct kvm *kvm,
332 struct kvm_assigned_dev_kernel *dev,
333 __u32 host_irq_type)
334{
335 int r = -EEXIST;
336
337 if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
338 return r;
339
1e001d49
JK
340 snprintf(dev->irq_name, sizeof(dev->irq_name), "kvm:%s",
341 pci_name(dev->dev));
342
bfd99ff5
AK
343 switch (host_irq_type) {
344 case KVM_DEV_IRQ_HOST_INTX:
345 r = assigned_device_enable_host_intx(kvm, dev);
346 break;
347#ifdef __KVM_HAVE_MSI
348 case KVM_DEV_IRQ_HOST_MSI:
349 r = assigned_device_enable_host_msi(kvm, dev);
350 break;
351#endif
352#ifdef __KVM_HAVE_MSIX
353 case KVM_DEV_IRQ_HOST_MSIX:
354 r = assigned_device_enable_host_msix(kvm, dev);
355 break;
356#endif
357 default:
358 r = -EINVAL;
359 }
360
361 if (!r)
362 dev->irq_requested_type |= host_irq_type;
363
364 return r;
365}
366
367static int assign_guest_irq(struct kvm *kvm,
368 struct kvm_assigned_dev_kernel *dev,
369 struct kvm_assigned_irq *irq,
370 unsigned long guest_irq_type)
371{
372 int id;
373 int r = -EEXIST;
374
375 if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
376 return r;
377
378 id = kvm_request_irq_source_id(kvm);
379 if (id < 0)
380 return id;
381
382 dev->irq_source_id = id;
383
384 switch (guest_irq_type) {
385 case KVM_DEV_IRQ_GUEST_INTX:
386 r = assigned_device_enable_guest_intx(kvm, dev, irq);
387 break;
388#ifdef __KVM_HAVE_MSI
389 case KVM_DEV_IRQ_GUEST_MSI:
390 r = assigned_device_enable_guest_msi(kvm, dev, irq);
391 break;
392#endif
393#ifdef __KVM_HAVE_MSIX
394 case KVM_DEV_IRQ_GUEST_MSIX:
395 r = assigned_device_enable_guest_msix(kvm, dev, irq);
396 break;
397#endif
398 default:
399 r = -EINVAL;
400 }
401
402 if (!r) {
403 dev->irq_requested_type |= guest_irq_type;
c61fa9d6
JK
404 if (dev->ack_notifier.gsi != -1)
405 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
bfd99ff5
AK
406 } else
407 kvm_free_irq_source_id(kvm, dev->irq_source_id);
408
409 return r;
410}
411
412/* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
413static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
414 struct kvm_assigned_irq *assigned_irq)
415{
416 int r = -EINVAL;
417 struct kvm_assigned_dev_kernel *match;
418 unsigned long host_irq_type, guest_irq_type;
419
bfd99ff5
AK
420 if (!irqchip_in_kernel(kvm))
421 return r;
422
423 mutex_lock(&kvm->lock);
424 r = -ENODEV;
425 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
426 assigned_irq->assigned_dev_id);
427 if (!match)
428 goto out;
429
430 host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
431 guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
432
433 r = -EINVAL;
434 /* can only assign one type at a time */
435 if (hweight_long(host_irq_type) > 1)
436 goto out;
437 if (hweight_long(guest_irq_type) > 1)
438 goto out;
439 if (host_irq_type == 0 && guest_irq_type == 0)
440 goto out;
441
442 r = 0;
443 if (host_irq_type)
444 r = assign_host_irq(kvm, match, host_irq_type);
445 if (r)
446 goto out;
447
448 if (guest_irq_type)
449 r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
450out:
451 mutex_unlock(&kvm->lock);
452 return r;
453}
454
455static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
456 struct kvm_assigned_irq
457 *assigned_irq)
458{
459 int r = -ENODEV;
460 struct kvm_assigned_dev_kernel *match;
461
462 mutex_lock(&kvm->lock);
463
464 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
465 assigned_irq->assigned_dev_id);
466 if (!match)
467 goto out;
468
469 r = kvm_deassign_irq(kvm, match, assigned_irq->flags);
470out:
471 mutex_unlock(&kvm->lock);
472 return r;
473}
474
475static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
476 struct kvm_assigned_pci_dev *assigned_dev)
477{
bc6678a3 478 int r = 0, idx;
bfd99ff5
AK
479 struct kvm_assigned_dev_kernel *match;
480 struct pci_dev *dev;
481
bfd99ff5 482 mutex_lock(&kvm->lock);
bc6678a3 483 idx = srcu_read_lock(&kvm->srcu);
bfd99ff5
AK
484
485 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
486 assigned_dev->assigned_dev_id);
487 if (match) {
488 /* device already assigned */
489 r = -EEXIST;
490 goto out;
491 }
492
493 match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
494 if (match == NULL) {
495 printk(KERN_INFO "%s: Couldn't allocate memory\n",
496 __func__);
497 r = -ENOMEM;
498 goto out;
499 }
ab9f4ecb
ZE
500 dev = pci_get_domain_bus_and_slot(assigned_dev->segnr,
501 assigned_dev->busnr,
bfd99ff5
AK
502 assigned_dev->devfn);
503 if (!dev) {
504 printk(KERN_INFO "%s: host device not found\n", __func__);
505 r = -EINVAL;
506 goto out_free;
507 }
508 if (pci_enable_device(dev)) {
509 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
510 r = -EBUSY;
511 goto out_put;
512 }
513 r = pci_request_regions(dev, "kvm_assigned_device");
514 if (r) {
515 printk(KERN_INFO "%s: Could not get access to device regions\n",
516 __func__);
517 goto out_disable;
518 }
519
520 pci_reset_function(dev);
ed78661f 521 pci_save_state(dev);
f8fcfd77
AW
522 match->pci_saved_state = pci_store_saved_state(dev);
523 if (!match->pci_saved_state)
524 printk(KERN_DEBUG "%s: Couldn't store %s saved state\n",
525 __func__, dev_name(&dev->dev));
bfd99ff5 526 match->assigned_dev_id = assigned_dev->assigned_dev_id;
ab9f4ecb 527 match->host_segnr = assigned_dev->segnr;
bfd99ff5
AK
528 match->host_busnr = assigned_dev->busnr;
529 match->host_devfn = assigned_dev->devfn;
530 match->flags = assigned_dev->flags;
531 match->dev = dev;
0645211c 532 spin_lock_init(&match->intx_lock);
bfd99ff5
AK
533 match->irq_source_id = -1;
534 match->kvm = kvm;
535 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
bfd99ff5
AK
536
537 list_add(&match->list, &kvm->arch.assigned_dev_head);
538
539 if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
540 if (!kvm->arch.iommu_domain) {
541 r = kvm_iommu_map_guest(kvm);
542 if (r)
543 goto out_list_del;
544 }
545 r = kvm_assign_device(kvm, match);
546 if (r)
547 goto out_list_del;
548 }
549
550out:
bc6678a3 551 srcu_read_unlock(&kvm->srcu, idx);
fae3a353 552 mutex_unlock(&kvm->lock);
bfd99ff5
AK
553 return r;
554out_list_del:
f8fcfd77
AW
555 if (pci_load_and_free_saved_state(dev, &match->pci_saved_state))
556 printk(KERN_INFO "%s: Couldn't reload %s saved state\n",
557 __func__, dev_name(&dev->dev));
bfd99ff5
AK
558 list_del(&match->list);
559 pci_release_regions(dev);
560out_disable:
561 pci_disable_device(dev);
562out_put:
563 pci_dev_put(dev);
564out_free:
565 kfree(match);
bc6678a3 566 srcu_read_unlock(&kvm->srcu, idx);
fae3a353 567 mutex_unlock(&kvm->lock);
bfd99ff5
AK
568 return r;
569}
570
571static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
572 struct kvm_assigned_pci_dev *assigned_dev)
573{
574 int r = 0;
575 struct kvm_assigned_dev_kernel *match;
576
577 mutex_lock(&kvm->lock);
578
579 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
580 assigned_dev->assigned_dev_id);
581 if (!match) {
582 printk(KERN_INFO "%s: device hasn't been assigned before, "
583 "so cannot be deassigned\n", __func__);
584 r = -EINVAL;
585 goto out;
586 }
587
588 if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
589 kvm_deassign_device(kvm, match);
590
591 kvm_free_assigned_device(kvm, match);
592
593out:
594 mutex_unlock(&kvm->lock);
595 return r;
596}
597
598
599#ifdef __KVM_HAVE_MSIX
600static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
601 struct kvm_assigned_msix_nr *entry_nr)
602{
603 int r = 0;
604 struct kvm_assigned_dev_kernel *adev;
605
606 mutex_lock(&kvm->lock);
607
608 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
609 entry_nr->assigned_dev_id);
610 if (!adev) {
611 r = -EINVAL;
612 goto msix_nr_out;
613 }
614
615 if (adev->entries_nr == 0) {
616 adev->entries_nr = entry_nr->entry_nr;
617 if (adev->entries_nr == 0 ||
9f3191ae 618 adev->entries_nr > KVM_MAX_MSIX_PER_DEV) {
bfd99ff5
AK
619 r = -EINVAL;
620 goto msix_nr_out;
621 }
622
623 adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
624 entry_nr->entry_nr,
625 GFP_KERNEL);
626 if (!adev->host_msix_entries) {
627 r = -ENOMEM;
628 goto msix_nr_out;
629 }
0645211c
JK
630 adev->guest_msix_entries =
631 kzalloc(sizeof(struct msix_entry) * entry_nr->entry_nr,
632 GFP_KERNEL);
bfd99ff5
AK
633 if (!adev->guest_msix_entries) {
634 kfree(adev->host_msix_entries);
635 r = -ENOMEM;
636 goto msix_nr_out;
637 }
638 } else /* Not allowed set MSI-X number twice */
639 r = -EINVAL;
640msix_nr_out:
641 mutex_unlock(&kvm->lock);
642 return r;
643}
644
645static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
646 struct kvm_assigned_msix_entry *entry)
647{
648 int r = 0, i;
649 struct kvm_assigned_dev_kernel *adev;
650
651 mutex_lock(&kvm->lock);
652
653 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
654 entry->assigned_dev_id);
655
656 if (!adev) {
657 r = -EINVAL;
658 goto msix_entry_out;
659 }
660
661 for (i = 0; i < adev->entries_nr; i++)
662 if (adev->guest_msix_entries[i].vector == 0 ||
663 adev->guest_msix_entries[i].entry == entry->entry) {
664 adev->guest_msix_entries[i].entry = entry->entry;
665 adev->guest_msix_entries[i].vector = entry->gsi;
666 adev->host_msix_entries[i].entry = entry->entry;
667 break;
668 }
669 if (i == adev->entries_nr) {
670 r = -ENOSPC;
671 goto msix_entry_out;
672 }
673
674msix_entry_out:
675 mutex_unlock(&kvm->lock);
676
677 return r;
678}
679#endif
680
681long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
682 unsigned long arg)
683{
684 void __user *argp = (void __user *)arg;
51de271d 685 int r;
bfd99ff5
AK
686
687 switch (ioctl) {
688 case KVM_ASSIGN_PCI_DEVICE: {
689 struct kvm_assigned_pci_dev assigned_dev;
690
691 r = -EFAULT;
692 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
693 goto out;
694 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
695 if (r)
696 goto out;
697 break;
698 }
699 case KVM_ASSIGN_IRQ: {
700 r = -EOPNOTSUPP;
701 break;
702 }
bfd99ff5
AK
703 case KVM_ASSIGN_DEV_IRQ: {
704 struct kvm_assigned_irq assigned_irq;
705
706 r = -EFAULT;
707 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
708 goto out;
709 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
710 if (r)
711 goto out;
712 break;
713 }
714 case KVM_DEASSIGN_DEV_IRQ: {
715 struct kvm_assigned_irq assigned_irq;
716
717 r = -EFAULT;
718 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
719 goto out;
720 r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
721 if (r)
722 goto out;
723 break;
724 }
bfd99ff5
AK
725 case KVM_DEASSIGN_PCI_DEVICE: {
726 struct kvm_assigned_pci_dev assigned_dev;
727
728 r = -EFAULT;
729 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
730 goto out;
731 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
732 if (r)
733 goto out;
734 break;
735 }
bfd99ff5
AK
736#ifdef KVM_CAP_IRQ_ROUTING
737 case KVM_SET_GSI_ROUTING: {
738 struct kvm_irq_routing routing;
739 struct kvm_irq_routing __user *urouting;
740 struct kvm_irq_routing_entry *entries;
741
742 r = -EFAULT;
743 if (copy_from_user(&routing, argp, sizeof(routing)))
744 goto out;
745 r = -EINVAL;
746 if (routing.nr >= KVM_MAX_IRQ_ROUTES)
747 goto out;
748 if (routing.flags)
749 goto out;
750 r = -ENOMEM;
751 entries = vmalloc(routing.nr * sizeof(*entries));
752 if (!entries)
753 goto out;
754 r = -EFAULT;
755 urouting = argp;
756 if (copy_from_user(entries, urouting->entries,
757 routing.nr * sizeof(*entries)))
758 goto out_free_irq_routing;
759 r = kvm_set_irq_routing(kvm, entries, routing.nr,
760 routing.flags);
761 out_free_irq_routing:
762 vfree(entries);
763 break;
764 }
765#endif /* KVM_CAP_IRQ_ROUTING */
766#ifdef __KVM_HAVE_MSIX
767 case KVM_ASSIGN_SET_MSIX_NR: {
768 struct kvm_assigned_msix_nr entry_nr;
769 r = -EFAULT;
770 if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
771 goto out;
772 r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
773 if (r)
774 goto out;
775 break;
776 }
777 case KVM_ASSIGN_SET_MSIX_ENTRY: {
778 struct kvm_assigned_msix_entry entry;
779 r = -EFAULT;
780 if (copy_from_user(&entry, argp, sizeof entry))
781 goto out;
782 r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
783 if (r)
784 goto out;
785 break;
786 }
787#endif
51de271d
JK
788 default:
789 r = -ENOTTY;
790 break;
bfd99ff5
AK
791 }
792out:
793 return r;
794}
795
This page took 0.172429 seconds and 5 git commands to generate.