Commit | Line | Data |
---|---|---|
bfd99ff5 AK |
1 | /* |
2 | * Kernel-based Virtual Machine - device assignment support | |
3 | * | |
221d059d | 4 | * Copyright (C) 2010 Red Hat, Inc. and/or its affiliates. |
bfd99ff5 AK |
5 | * |
6 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
7 | * the COPYING file in the top-level directory. | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <linux/kvm_host.h> | |
12 | #include <linux/kvm.h> | |
13 | #include <linux/uaccess.h> | |
14 | #include <linux/vmalloc.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/spinlock.h> | |
17 | #include <linux/pci.h> | |
18 | #include <linux/interrupt.h> | |
5a0e3ad6 | 19 | #include <linux/slab.h> |
bfd99ff5 AK |
20 | #include "irq.h" |
21 | ||
22 | static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, | |
23 | int assigned_dev_id) | |
24 | { | |
25 | struct list_head *ptr; | |
26 | struct kvm_assigned_dev_kernel *match; | |
27 | ||
28 | list_for_each(ptr, head) { | |
29 | match = list_entry(ptr, struct kvm_assigned_dev_kernel, list); | |
30 | if (match->assigned_dev_id == assigned_dev_id) | |
31 | return match; | |
32 | } | |
33 | return NULL; | |
34 | } | |
35 | ||
36 | static int find_index_from_host_irq(struct kvm_assigned_dev_kernel | |
37 | *assigned_dev, int irq) | |
38 | { | |
39 | int i, index; | |
40 | struct msix_entry *host_msix_entries; | |
41 | ||
42 | host_msix_entries = assigned_dev->host_msix_entries; | |
43 | ||
44 | index = -1; | |
45 | for (i = 0; i < assigned_dev->entries_nr; i++) | |
46 | if (irq == host_msix_entries[i].vector) { | |
47 | index = i; | |
48 | break; | |
49 | } | |
50 | if (index < 0) { | |
51 | printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n"); | |
52 | return 0; | |
53 | } | |
54 | ||
55 | return index; | |
56 | } | |
57 | ||
0645211c | 58 | static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id) |
bfd99ff5 | 59 | { |
0645211c JK |
60 | struct kvm_assigned_dev_kernel *assigned_dev = dev_id; |
61 | u32 vector; | |
62 | int index; | |
bfd99ff5 | 63 | |
0645211c JK |
64 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) { |
65 | spin_lock(&assigned_dev->intx_lock); | |
66 | disable_irq_nosync(irq); | |
67 | assigned_dev->host_irq_disabled = true; | |
68 | spin_unlock(&assigned_dev->intx_lock); | |
69 | } | |
bfd99ff5 | 70 | |
bfd99ff5 | 71 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { |
0645211c JK |
72 | index = find_index_from_host_irq(assigned_dev, irq); |
73 | if (index >= 0) { | |
74 | vector = assigned_dev-> | |
75 | guest_msix_entries[index].vector; | |
bfd99ff5 | 76 | kvm_set_irq(assigned_dev->kvm, |
0645211c | 77 | assigned_dev->irq_source_id, vector, 1); |
bfd99ff5 AK |
78 | } |
79 | } else | |
80 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, | |
81 | assigned_dev->guest_irq, 1); | |
82 | ||
bfd99ff5 AK |
83 | return IRQ_HANDLED; |
84 | } | |
85 | ||
86 | /* Ack the irq line for an assigned device */ | |
87 | static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) | |
88 | { | |
89 | struct kvm_assigned_dev_kernel *dev; | |
bfd99ff5 AK |
90 | |
91 | if (kian->gsi == -1) | |
92 | return; | |
93 | ||
94 | dev = container_of(kian, struct kvm_assigned_dev_kernel, | |
95 | ack_notifier); | |
96 | ||
97 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); | |
98 | ||
99 | /* The guest irq may be shared so this ack may be | |
100 | * from another device. | |
101 | */ | |
0645211c | 102 | spin_lock(&dev->intx_lock); |
bfd99ff5 AK |
103 | if (dev->host_irq_disabled) { |
104 | enable_irq(dev->host_irq); | |
105 | dev->host_irq_disabled = false; | |
106 | } | |
0645211c | 107 | spin_unlock(&dev->intx_lock); |
bfd99ff5 AK |
108 | } |
109 | ||
110 | static void deassign_guest_irq(struct kvm *kvm, | |
111 | struct kvm_assigned_dev_kernel *assigned_dev) | |
112 | { | |
113 | kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); | |
114 | assigned_dev->ack_notifier.gsi = -1; | |
115 | ||
0c106b5a JK |
116 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, |
117 | assigned_dev->guest_irq, 0); | |
118 | ||
bfd99ff5 AK |
119 | if (assigned_dev->irq_source_id != -1) |
120 | kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); | |
121 | assigned_dev->irq_source_id = -1; | |
122 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK); | |
123 | } | |
124 | ||
125 | /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */ | |
126 | static void deassign_host_irq(struct kvm *kvm, | |
127 | struct kvm_assigned_dev_kernel *assigned_dev) | |
128 | { | |
129 | /* | |
0645211c | 130 | * We disable irq here to prevent further events. |
bfd99ff5 AK |
131 | * |
132 | * Notice this maybe result in nested disable if the interrupt type is | |
133 | * INTx, but it's OK for we are going to free it. | |
134 | * | |
135 | * If this function is a part of VM destroy, please ensure that till | |
136 | * now, the kvm state is still legal for probably we also have to wait | |
0645211c | 137 | * on a currently running IRQ handler. |
bfd99ff5 AK |
138 | */ |
139 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | |
140 | int i; | |
141 | for (i = 0; i < assigned_dev->entries_nr; i++) | |
0645211c | 142 | disable_irq(assigned_dev->host_msix_entries[i].vector); |
bfd99ff5 AK |
143 | |
144 | for (i = 0; i < assigned_dev->entries_nr; i++) | |
145 | free_irq(assigned_dev->host_msix_entries[i].vector, | |
146 | (void *)assigned_dev); | |
147 | ||
148 | assigned_dev->entries_nr = 0; | |
149 | kfree(assigned_dev->host_msix_entries); | |
150 | kfree(assigned_dev->guest_msix_entries); | |
151 | pci_disable_msix(assigned_dev->dev); | |
152 | } else { | |
153 | /* Deal with MSI and INTx */ | |
0645211c | 154 | disable_irq(assigned_dev->host_irq); |
bfd99ff5 AK |
155 | |
156 | free_irq(assigned_dev->host_irq, (void *)assigned_dev); | |
157 | ||
158 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI) | |
159 | pci_disable_msi(assigned_dev->dev); | |
160 | } | |
161 | ||
162 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK); | |
163 | } | |
164 | ||
165 | static int kvm_deassign_irq(struct kvm *kvm, | |
166 | struct kvm_assigned_dev_kernel *assigned_dev, | |
167 | unsigned long irq_requested_type) | |
168 | { | |
169 | unsigned long guest_irq_type, host_irq_type; | |
170 | ||
171 | if (!irqchip_in_kernel(kvm)) | |
172 | return -EINVAL; | |
173 | /* no irq assignment to deassign */ | |
174 | if (!assigned_dev->irq_requested_type) | |
175 | return -ENXIO; | |
176 | ||
177 | host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK; | |
178 | guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK; | |
179 | ||
180 | if (host_irq_type) | |
181 | deassign_host_irq(kvm, assigned_dev); | |
182 | if (guest_irq_type) | |
183 | deassign_guest_irq(kvm, assigned_dev); | |
184 | ||
185 | return 0; | |
186 | } | |
187 | ||
188 | static void kvm_free_assigned_irq(struct kvm *kvm, | |
189 | struct kvm_assigned_dev_kernel *assigned_dev) | |
190 | { | |
191 | kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type); | |
192 | } | |
193 | ||
194 | static void kvm_free_assigned_device(struct kvm *kvm, | |
195 | struct kvm_assigned_dev_kernel | |
196 | *assigned_dev) | |
197 | { | |
198 | kvm_free_assigned_irq(kvm, assigned_dev); | |
199 | ||
ed78661f JK |
200 | __pci_reset_function(assigned_dev->dev); |
201 | pci_restore_state(assigned_dev->dev); | |
bfd99ff5 AK |
202 | |
203 | pci_release_regions(assigned_dev->dev); | |
204 | pci_disable_device(assigned_dev->dev); | |
205 | pci_dev_put(assigned_dev->dev); | |
206 | ||
207 | list_del(&assigned_dev->list); | |
208 | kfree(assigned_dev); | |
209 | } | |
210 | ||
211 | void kvm_free_all_assigned_devices(struct kvm *kvm) | |
212 | { | |
213 | struct list_head *ptr, *ptr2; | |
214 | struct kvm_assigned_dev_kernel *assigned_dev; | |
215 | ||
216 | list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { | |
217 | assigned_dev = list_entry(ptr, | |
218 | struct kvm_assigned_dev_kernel, | |
219 | list); | |
220 | ||
221 | kvm_free_assigned_device(kvm, assigned_dev); | |
222 | } | |
223 | } | |
224 | ||
225 | static int assigned_device_enable_host_intx(struct kvm *kvm, | |
226 | struct kvm_assigned_dev_kernel *dev) | |
227 | { | |
228 | dev->host_irq = dev->dev->irq; | |
229 | /* Even though this is PCI, we don't want to use shared | |
230 | * interrupts. Sharing host devices with guest-assigned devices | |
231 | * on the same interrupt line is not a happy situation: there | |
232 | * are going to be long delays in accepting, acking, etc. | |
233 | */ | |
0645211c | 234 | if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread, |
1e001d49 | 235 | IRQF_ONESHOT, dev->irq_name, (void *)dev)) |
bfd99ff5 AK |
236 | return -EIO; |
237 | return 0; | |
238 | } | |
239 | ||
240 | #ifdef __KVM_HAVE_MSI | |
241 | static int assigned_device_enable_host_msi(struct kvm *kvm, | |
242 | struct kvm_assigned_dev_kernel *dev) | |
243 | { | |
244 | int r; | |
245 | ||
246 | if (!dev->dev->msi_enabled) { | |
247 | r = pci_enable_msi(dev->dev); | |
248 | if (r) | |
249 | return r; | |
250 | } | |
251 | ||
252 | dev->host_irq = dev->dev->irq; | |
0645211c | 253 | if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread, |
1e001d49 | 254 | 0, dev->irq_name, (void *)dev)) { |
bfd99ff5 AK |
255 | pci_disable_msi(dev->dev); |
256 | return -EIO; | |
257 | } | |
258 | ||
259 | return 0; | |
260 | } | |
261 | #endif | |
262 | ||
263 | #ifdef __KVM_HAVE_MSIX | |
264 | static int assigned_device_enable_host_msix(struct kvm *kvm, | |
265 | struct kvm_assigned_dev_kernel *dev) | |
266 | { | |
267 | int i, r = -EINVAL; | |
268 | ||
269 | /* host_msix_entries and guest_msix_entries should have been | |
270 | * initialized */ | |
271 | if (dev->entries_nr == 0) | |
272 | return r; | |
273 | ||
274 | r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr); | |
275 | if (r) | |
276 | return r; | |
277 | ||
278 | for (i = 0; i < dev->entries_nr; i++) { | |
0645211c JK |
279 | r = request_threaded_irq(dev->host_msix_entries[i].vector, |
280 | NULL, kvm_assigned_dev_thread, | |
1e001d49 | 281 | 0, dev->irq_name, (void *)dev); |
bfd99ff5 | 282 | if (r) |
d57e2c07 | 283 | goto err; |
bfd99ff5 AK |
284 | } |
285 | ||
286 | return 0; | |
d57e2c07 | 287 | err: |
288 | for (i -= 1; i >= 0; i--) | |
289 | free_irq(dev->host_msix_entries[i].vector, (void *)dev); | |
290 | pci_disable_msix(dev->dev); | |
291 | return r; | |
bfd99ff5 AK |
292 | } |
293 | ||
294 | #endif | |
295 | ||
296 | static int assigned_device_enable_guest_intx(struct kvm *kvm, | |
297 | struct kvm_assigned_dev_kernel *dev, | |
298 | struct kvm_assigned_irq *irq) | |
299 | { | |
300 | dev->guest_irq = irq->guest_irq; | |
301 | dev->ack_notifier.gsi = irq->guest_irq; | |
302 | return 0; | |
303 | } | |
304 | ||
305 | #ifdef __KVM_HAVE_MSI | |
306 | static int assigned_device_enable_guest_msi(struct kvm *kvm, | |
307 | struct kvm_assigned_dev_kernel *dev, | |
308 | struct kvm_assigned_irq *irq) | |
309 | { | |
310 | dev->guest_irq = irq->guest_irq; | |
311 | dev->ack_notifier.gsi = -1; | |
312 | dev->host_irq_disabled = false; | |
313 | return 0; | |
314 | } | |
315 | #endif | |
316 | ||
317 | #ifdef __KVM_HAVE_MSIX | |
318 | static int assigned_device_enable_guest_msix(struct kvm *kvm, | |
319 | struct kvm_assigned_dev_kernel *dev, | |
320 | struct kvm_assigned_irq *irq) | |
321 | { | |
322 | dev->guest_irq = irq->guest_irq; | |
323 | dev->ack_notifier.gsi = -1; | |
324 | dev->host_irq_disabled = false; | |
325 | return 0; | |
326 | } | |
327 | #endif | |
328 | ||
329 | static int assign_host_irq(struct kvm *kvm, | |
330 | struct kvm_assigned_dev_kernel *dev, | |
331 | __u32 host_irq_type) | |
332 | { | |
333 | int r = -EEXIST; | |
334 | ||
335 | if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK) | |
336 | return r; | |
337 | ||
1e001d49 JK |
338 | snprintf(dev->irq_name, sizeof(dev->irq_name), "kvm:%s", |
339 | pci_name(dev->dev)); | |
340 | ||
bfd99ff5 AK |
341 | switch (host_irq_type) { |
342 | case KVM_DEV_IRQ_HOST_INTX: | |
343 | r = assigned_device_enable_host_intx(kvm, dev); | |
344 | break; | |
345 | #ifdef __KVM_HAVE_MSI | |
346 | case KVM_DEV_IRQ_HOST_MSI: | |
347 | r = assigned_device_enable_host_msi(kvm, dev); | |
348 | break; | |
349 | #endif | |
350 | #ifdef __KVM_HAVE_MSIX | |
351 | case KVM_DEV_IRQ_HOST_MSIX: | |
352 | r = assigned_device_enable_host_msix(kvm, dev); | |
353 | break; | |
354 | #endif | |
355 | default: | |
356 | r = -EINVAL; | |
357 | } | |
358 | ||
359 | if (!r) | |
360 | dev->irq_requested_type |= host_irq_type; | |
361 | ||
362 | return r; | |
363 | } | |
364 | ||
365 | static int assign_guest_irq(struct kvm *kvm, | |
366 | struct kvm_assigned_dev_kernel *dev, | |
367 | struct kvm_assigned_irq *irq, | |
368 | unsigned long guest_irq_type) | |
369 | { | |
370 | int id; | |
371 | int r = -EEXIST; | |
372 | ||
373 | if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK) | |
374 | return r; | |
375 | ||
376 | id = kvm_request_irq_source_id(kvm); | |
377 | if (id < 0) | |
378 | return id; | |
379 | ||
380 | dev->irq_source_id = id; | |
381 | ||
382 | switch (guest_irq_type) { | |
383 | case KVM_DEV_IRQ_GUEST_INTX: | |
384 | r = assigned_device_enable_guest_intx(kvm, dev, irq); | |
385 | break; | |
386 | #ifdef __KVM_HAVE_MSI | |
387 | case KVM_DEV_IRQ_GUEST_MSI: | |
388 | r = assigned_device_enable_guest_msi(kvm, dev, irq); | |
389 | break; | |
390 | #endif | |
391 | #ifdef __KVM_HAVE_MSIX | |
392 | case KVM_DEV_IRQ_GUEST_MSIX: | |
393 | r = assigned_device_enable_guest_msix(kvm, dev, irq); | |
394 | break; | |
395 | #endif | |
396 | default: | |
397 | r = -EINVAL; | |
398 | } | |
399 | ||
400 | if (!r) { | |
401 | dev->irq_requested_type |= guest_irq_type; | |
402 | kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier); | |
403 | } else | |
404 | kvm_free_irq_source_id(kvm, dev->irq_source_id); | |
405 | ||
406 | return r; | |
407 | } | |
408 | ||
409 | /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */ | |
410 | static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, | |
411 | struct kvm_assigned_irq *assigned_irq) | |
412 | { | |
413 | int r = -EINVAL; | |
414 | struct kvm_assigned_dev_kernel *match; | |
415 | unsigned long host_irq_type, guest_irq_type; | |
416 | ||
bfd99ff5 AK |
417 | if (!irqchip_in_kernel(kvm)) |
418 | return r; | |
419 | ||
420 | mutex_lock(&kvm->lock); | |
421 | r = -ENODEV; | |
422 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | |
423 | assigned_irq->assigned_dev_id); | |
424 | if (!match) | |
425 | goto out; | |
426 | ||
427 | host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK); | |
428 | guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK); | |
429 | ||
430 | r = -EINVAL; | |
431 | /* can only assign one type at a time */ | |
432 | if (hweight_long(host_irq_type) > 1) | |
433 | goto out; | |
434 | if (hweight_long(guest_irq_type) > 1) | |
435 | goto out; | |
436 | if (host_irq_type == 0 && guest_irq_type == 0) | |
437 | goto out; | |
438 | ||
439 | r = 0; | |
440 | if (host_irq_type) | |
441 | r = assign_host_irq(kvm, match, host_irq_type); | |
442 | if (r) | |
443 | goto out; | |
444 | ||
445 | if (guest_irq_type) | |
446 | r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type); | |
447 | out: | |
448 | mutex_unlock(&kvm->lock); | |
449 | return r; | |
450 | } | |
451 | ||
452 | static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, | |
453 | struct kvm_assigned_irq | |
454 | *assigned_irq) | |
455 | { | |
456 | int r = -ENODEV; | |
457 | struct kvm_assigned_dev_kernel *match; | |
458 | ||
459 | mutex_lock(&kvm->lock); | |
460 | ||
461 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | |
462 | assigned_irq->assigned_dev_id); | |
463 | if (!match) | |
464 | goto out; | |
465 | ||
466 | r = kvm_deassign_irq(kvm, match, assigned_irq->flags); | |
467 | out: | |
468 | mutex_unlock(&kvm->lock); | |
469 | return r; | |
470 | } | |
471 | ||
472 | static int kvm_vm_ioctl_assign_device(struct kvm *kvm, | |
473 | struct kvm_assigned_pci_dev *assigned_dev) | |
474 | { | |
bc6678a3 | 475 | int r = 0, idx; |
bfd99ff5 AK |
476 | struct kvm_assigned_dev_kernel *match; |
477 | struct pci_dev *dev; | |
478 | ||
bfd99ff5 | 479 | mutex_lock(&kvm->lock); |
bc6678a3 | 480 | idx = srcu_read_lock(&kvm->srcu); |
bfd99ff5 AK |
481 | |
482 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | |
483 | assigned_dev->assigned_dev_id); | |
484 | if (match) { | |
485 | /* device already assigned */ | |
486 | r = -EEXIST; | |
487 | goto out; | |
488 | } | |
489 | ||
490 | match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL); | |
491 | if (match == NULL) { | |
492 | printk(KERN_INFO "%s: Couldn't allocate memory\n", | |
493 | __func__); | |
494 | r = -ENOMEM; | |
495 | goto out; | |
496 | } | |
ab9f4ecb ZE |
497 | dev = pci_get_domain_bus_and_slot(assigned_dev->segnr, |
498 | assigned_dev->busnr, | |
bfd99ff5 AK |
499 | assigned_dev->devfn); |
500 | if (!dev) { | |
501 | printk(KERN_INFO "%s: host device not found\n", __func__); | |
502 | r = -EINVAL; | |
503 | goto out_free; | |
504 | } | |
505 | if (pci_enable_device(dev)) { | |
506 | printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); | |
507 | r = -EBUSY; | |
508 | goto out_put; | |
509 | } | |
510 | r = pci_request_regions(dev, "kvm_assigned_device"); | |
511 | if (r) { | |
512 | printk(KERN_INFO "%s: Could not get access to device regions\n", | |
513 | __func__); | |
514 | goto out_disable; | |
515 | } | |
516 | ||
517 | pci_reset_function(dev); | |
ed78661f | 518 | pci_save_state(dev); |
bfd99ff5 AK |
519 | |
520 | match->assigned_dev_id = assigned_dev->assigned_dev_id; | |
ab9f4ecb | 521 | match->host_segnr = assigned_dev->segnr; |
bfd99ff5 AK |
522 | match->host_busnr = assigned_dev->busnr; |
523 | match->host_devfn = assigned_dev->devfn; | |
524 | match->flags = assigned_dev->flags; | |
525 | match->dev = dev; | |
0645211c | 526 | spin_lock_init(&match->intx_lock); |
bfd99ff5 AK |
527 | match->irq_source_id = -1; |
528 | match->kvm = kvm; | |
529 | match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; | |
bfd99ff5 AK |
530 | |
531 | list_add(&match->list, &kvm->arch.assigned_dev_head); | |
532 | ||
533 | if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { | |
534 | if (!kvm->arch.iommu_domain) { | |
535 | r = kvm_iommu_map_guest(kvm); | |
536 | if (r) | |
537 | goto out_list_del; | |
538 | } | |
539 | r = kvm_assign_device(kvm, match); | |
540 | if (r) | |
541 | goto out_list_del; | |
542 | } | |
543 | ||
544 | out: | |
bc6678a3 | 545 | srcu_read_unlock(&kvm->srcu, idx); |
fae3a353 | 546 | mutex_unlock(&kvm->lock); |
bfd99ff5 AK |
547 | return r; |
548 | out_list_del: | |
ed78661f | 549 | pci_restore_state(dev); |
bfd99ff5 AK |
550 | list_del(&match->list); |
551 | pci_release_regions(dev); | |
552 | out_disable: | |
553 | pci_disable_device(dev); | |
554 | out_put: | |
555 | pci_dev_put(dev); | |
556 | out_free: | |
557 | kfree(match); | |
bc6678a3 | 558 | srcu_read_unlock(&kvm->srcu, idx); |
fae3a353 | 559 | mutex_unlock(&kvm->lock); |
bfd99ff5 AK |
560 | return r; |
561 | } | |
562 | ||
563 | static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, | |
564 | struct kvm_assigned_pci_dev *assigned_dev) | |
565 | { | |
566 | int r = 0; | |
567 | struct kvm_assigned_dev_kernel *match; | |
568 | ||
569 | mutex_lock(&kvm->lock); | |
570 | ||
571 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | |
572 | assigned_dev->assigned_dev_id); | |
573 | if (!match) { | |
574 | printk(KERN_INFO "%s: device hasn't been assigned before, " | |
575 | "so cannot be deassigned\n", __func__); | |
576 | r = -EINVAL; | |
577 | goto out; | |
578 | } | |
579 | ||
580 | if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) | |
581 | kvm_deassign_device(kvm, match); | |
582 | ||
583 | kvm_free_assigned_device(kvm, match); | |
584 | ||
585 | out: | |
586 | mutex_unlock(&kvm->lock); | |
587 | return r; | |
588 | } | |
589 | ||
590 | ||
591 | #ifdef __KVM_HAVE_MSIX | |
592 | static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm, | |
593 | struct kvm_assigned_msix_nr *entry_nr) | |
594 | { | |
595 | int r = 0; | |
596 | struct kvm_assigned_dev_kernel *adev; | |
597 | ||
598 | mutex_lock(&kvm->lock); | |
599 | ||
600 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | |
601 | entry_nr->assigned_dev_id); | |
602 | if (!adev) { | |
603 | r = -EINVAL; | |
604 | goto msix_nr_out; | |
605 | } | |
606 | ||
607 | if (adev->entries_nr == 0) { | |
608 | adev->entries_nr = entry_nr->entry_nr; | |
609 | if (adev->entries_nr == 0 || | |
610 | adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) { | |
611 | r = -EINVAL; | |
612 | goto msix_nr_out; | |
613 | } | |
614 | ||
615 | adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) * | |
616 | entry_nr->entry_nr, | |
617 | GFP_KERNEL); | |
618 | if (!adev->host_msix_entries) { | |
619 | r = -ENOMEM; | |
620 | goto msix_nr_out; | |
621 | } | |
0645211c JK |
622 | adev->guest_msix_entries = |
623 | kzalloc(sizeof(struct msix_entry) * entry_nr->entry_nr, | |
624 | GFP_KERNEL); | |
bfd99ff5 AK |
625 | if (!adev->guest_msix_entries) { |
626 | kfree(adev->host_msix_entries); | |
627 | r = -ENOMEM; | |
628 | goto msix_nr_out; | |
629 | } | |
630 | } else /* Not allowed set MSI-X number twice */ | |
631 | r = -EINVAL; | |
632 | msix_nr_out: | |
633 | mutex_unlock(&kvm->lock); | |
634 | return r; | |
635 | } | |
636 | ||
637 | static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm, | |
638 | struct kvm_assigned_msix_entry *entry) | |
639 | { | |
640 | int r = 0, i; | |
641 | struct kvm_assigned_dev_kernel *adev; | |
642 | ||
643 | mutex_lock(&kvm->lock); | |
644 | ||
645 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | |
646 | entry->assigned_dev_id); | |
647 | ||
648 | if (!adev) { | |
649 | r = -EINVAL; | |
650 | goto msix_entry_out; | |
651 | } | |
652 | ||
653 | for (i = 0; i < adev->entries_nr; i++) | |
654 | if (adev->guest_msix_entries[i].vector == 0 || | |
655 | adev->guest_msix_entries[i].entry == entry->entry) { | |
656 | adev->guest_msix_entries[i].entry = entry->entry; | |
657 | adev->guest_msix_entries[i].vector = entry->gsi; | |
658 | adev->host_msix_entries[i].entry = entry->entry; | |
659 | break; | |
660 | } | |
661 | if (i == adev->entries_nr) { | |
662 | r = -ENOSPC; | |
663 | goto msix_entry_out; | |
664 | } | |
665 | ||
666 | msix_entry_out: | |
667 | mutex_unlock(&kvm->lock); | |
668 | ||
669 | return r; | |
670 | } | |
671 | #endif | |
672 | ||
673 | long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, | |
674 | unsigned long arg) | |
675 | { | |
676 | void __user *argp = (void __user *)arg; | |
51de271d | 677 | int r; |
bfd99ff5 AK |
678 | |
679 | switch (ioctl) { | |
680 | case KVM_ASSIGN_PCI_DEVICE: { | |
681 | struct kvm_assigned_pci_dev assigned_dev; | |
682 | ||
683 | r = -EFAULT; | |
684 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | |
685 | goto out; | |
686 | r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev); | |
687 | if (r) | |
688 | goto out; | |
689 | break; | |
690 | } | |
691 | case KVM_ASSIGN_IRQ: { | |
692 | r = -EOPNOTSUPP; | |
693 | break; | |
694 | } | |
bfd99ff5 AK |
695 | case KVM_ASSIGN_DEV_IRQ: { |
696 | struct kvm_assigned_irq assigned_irq; | |
697 | ||
698 | r = -EFAULT; | |
699 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) | |
700 | goto out; | |
701 | r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq); | |
702 | if (r) | |
703 | goto out; | |
704 | break; | |
705 | } | |
706 | case KVM_DEASSIGN_DEV_IRQ: { | |
707 | struct kvm_assigned_irq assigned_irq; | |
708 | ||
709 | r = -EFAULT; | |
710 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) | |
711 | goto out; | |
712 | r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq); | |
713 | if (r) | |
714 | goto out; | |
715 | break; | |
716 | } | |
bfd99ff5 AK |
717 | case KVM_DEASSIGN_PCI_DEVICE: { |
718 | struct kvm_assigned_pci_dev assigned_dev; | |
719 | ||
720 | r = -EFAULT; | |
721 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | |
722 | goto out; | |
723 | r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev); | |
724 | if (r) | |
725 | goto out; | |
726 | break; | |
727 | } | |
bfd99ff5 AK |
728 | #ifdef KVM_CAP_IRQ_ROUTING |
729 | case KVM_SET_GSI_ROUTING: { | |
730 | struct kvm_irq_routing routing; | |
731 | struct kvm_irq_routing __user *urouting; | |
732 | struct kvm_irq_routing_entry *entries; | |
733 | ||
734 | r = -EFAULT; | |
735 | if (copy_from_user(&routing, argp, sizeof(routing))) | |
736 | goto out; | |
737 | r = -EINVAL; | |
738 | if (routing.nr >= KVM_MAX_IRQ_ROUTES) | |
739 | goto out; | |
740 | if (routing.flags) | |
741 | goto out; | |
742 | r = -ENOMEM; | |
743 | entries = vmalloc(routing.nr * sizeof(*entries)); | |
744 | if (!entries) | |
745 | goto out; | |
746 | r = -EFAULT; | |
747 | urouting = argp; | |
748 | if (copy_from_user(entries, urouting->entries, | |
749 | routing.nr * sizeof(*entries))) | |
750 | goto out_free_irq_routing; | |
751 | r = kvm_set_irq_routing(kvm, entries, routing.nr, | |
752 | routing.flags); | |
753 | out_free_irq_routing: | |
754 | vfree(entries); | |
755 | break; | |
756 | } | |
757 | #endif /* KVM_CAP_IRQ_ROUTING */ | |
758 | #ifdef __KVM_HAVE_MSIX | |
759 | case KVM_ASSIGN_SET_MSIX_NR: { | |
760 | struct kvm_assigned_msix_nr entry_nr; | |
761 | r = -EFAULT; | |
762 | if (copy_from_user(&entry_nr, argp, sizeof entry_nr)) | |
763 | goto out; | |
764 | r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr); | |
765 | if (r) | |
766 | goto out; | |
767 | break; | |
768 | } | |
769 | case KVM_ASSIGN_SET_MSIX_ENTRY: { | |
770 | struct kvm_assigned_msix_entry entry; | |
771 | r = -EFAULT; | |
772 | if (copy_from_user(&entry, argp, sizeof entry)) | |
773 | goto out; | |
774 | r = kvm_vm_ioctl_set_msix_entry(kvm, &entry); | |
775 | if (r) | |
776 | goto out; | |
777 | break; | |
778 | } | |
779 | #endif | |
51de271d JK |
780 | default: |
781 | r = -ENOTTY; | |
782 | break; | |
bfd99ff5 AK |
783 | } |
784 | out: | |
785 | return r; | |
786 | } | |
787 |