Commit | Line | Data |
---|---|---|
bfd99ff5 AK |
1 | /* |
2 | * Kernel-based Virtual Machine - device assignment support | |
3 | * | |
221d059d | 4 | * Copyright (C) 2010 Red Hat, Inc. and/or its affiliates. |
bfd99ff5 AK |
5 | * |
6 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
7 | * the COPYING file in the top-level directory. | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <linux/kvm_host.h> | |
12 | #include <linux/kvm.h> | |
13 | #include <linux/uaccess.h> | |
14 | #include <linux/vmalloc.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/spinlock.h> | |
17 | #include <linux/pci.h> | |
18 | #include <linux/interrupt.h> | |
5a0e3ad6 | 19 | #include <linux/slab.h> |
3d27e23b AW |
20 | #include <linux/namei.h> |
21 | #include <linux/fs.h> | |
bfd99ff5 AK |
22 | #include "irq.h" |
23 | ||
24 | static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, | |
25 | int assigned_dev_id) | |
26 | { | |
27 | struct list_head *ptr; | |
28 | struct kvm_assigned_dev_kernel *match; | |
29 | ||
30 | list_for_each(ptr, head) { | |
31 | match = list_entry(ptr, struct kvm_assigned_dev_kernel, list); | |
32 | if (match->assigned_dev_id == assigned_dev_id) | |
33 | return match; | |
34 | } | |
35 | return NULL; | |
36 | } | |
37 | ||
38 | static int find_index_from_host_irq(struct kvm_assigned_dev_kernel | |
39 | *assigned_dev, int irq) | |
40 | { | |
41 | int i, index; | |
42 | struct msix_entry *host_msix_entries; | |
43 | ||
44 | host_msix_entries = assigned_dev->host_msix_entries; | |
45 | ||
46 | index = -1; | |
47 | for (i = 0; i < assigned_dev->entries_nr; i++) | |
48 | if (irq == host_msix_entries[i].vector) { | |
49 | index = i; | |
50 | break; | |
51 | } | |
b93a3553 | 52 | if (index < 0) |
bfd99ff5 | 53 | printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n"); |
bfd99ff5 AK |
54 | |
55 | return index; | |
56 | } | |
57 | ||
07700a94 | 58 | static irqreturn_t kvm_assigned_dev_intx(int irq, void *dev_id) |
bfd99ff5 | 59 | { |
0645211c | 60 | struct kvm_assigned_dev_kernel *assigned_dev = dev_id; |
07700a94 | 61 | int ret; |
bfd99ff5 | 62 | |
07700a94 JK |
63 | spin_lock(&assigned_dev->intx_lock); |
64 | if (pci_check_and_mask_intx(assigned_dev->dev)) { | |
65 | assigned_dev->host_irq_disabled = true; | |
66 | ret = IRQ_WAKE_THREAD; | |
67 | } else | |
68 | ret = IRQ_NONE; | |
69 | spin_unlock(&assigned_dev->intx_lock); | |
70 | ||
71 | return ret; | |
72 | } | |
73 | ||
74 | static void | |
75 | kvm_assigned_dev_raise_guest_irq(struct kvm_assigned_dev_kernel *assigned_dev, | |
76 | int vector) | |
77 | { | |
78 | if (unlikely(assigned_dev->irq_requested_type & | |
79 | KVM_DEV_IRQ_GUEST_INTX)) { | |
cf9eeac4 | 80 | spin_lock(&assigned_dev->intx_mask_lock); |
07700a94 JK |
81 | if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) |
82 | kvm_set_irq(assigned_dev->kvm, | |
aa2fbe6d YZ |
83 | assigned_dev->irq_source_id, vector, 1, |
84 | false); | |
cf9eeac4 | 85 | spin_unlock(&assigned_dev->intx_mask_lock); |
07700a94 JK |
86 | } else |
87 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, | |
aa2fbe6d | 88 | vector, 1, false); |
07700a94 JK |
89 | } |
90 | ||
91 | static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id) | |
92 | { | |
93 | struct kvm_assigned_dev_kernel *assigned_dev = dev_id; | |
94 | ||
95 | if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) { | |
96 | spin_lock_irq(&assigned_dev->intx_lock); | |
0645211c JK |
97 | disable_irq_nosync(irq); |
98 | assigned_dev->host_irq_disabled = true; | |
07700a94 | 99 | spin_unlock_irq(&assigned_dev->intx_lock); |
0645211c | 100 | } |
bfd99ff5 | 101 | |
07700a94 JK |
102 | kvm_assigned_dev_raise_guest_irq(assigned_dev, |
103 | assigned_dev->guest_irq); | |
104 | ||
105 | return IRQ_HANDLED; | |
106 | } | |
107 | ||
108 | #ifdef __KVM_HAVE_MSI | |
78c63440 MT |
109 | static irqreturn_t kvm_assigned_dev_msi(int irq, void *dev_id) |
110 | { | |
111 | struct kvm_assigned_dev_kernel *assigned_dev = dev_id; | |
112 | int ret = kvm_set_irq_inatomic(assigned_dev->kvm, | |
113 | assigned_dev->irq_source_id, | |
114 | assigned_dev->guest_irq, 1); | |
115 | return unlikely(ret == -EWOULDBLOCK) ? IRQ_WAKE_THREAD : IRQ_HANDLED; | |
116 | } | |
117 | ||
07700a94 JK |
118 | static irqreturn_t kvm_assigned_dev_thread_msi(int irq, void *dev_id) |
119 | { | |
120 | struct kvm_assigned_dev_kernel *assigned_dev = dev_id; | |
121 | ||
122 | kvm_assigned_dev_raise_guest_irq(assigned_dev, | |
123 | assigned_dev->guest_irq); | |
cc079396 JK |
124 | |
125 | return IRQ_HANDLED; | |
126 | } | |
07700a94 | 127 | #endif |
cc079396 JK |
128 | |
129 | #ifdef __KVM_HAVE_MSIX | |
78c63440 MT |
130 | static irqreturn_t kvm_assigned_dev_msix(int irq, void *dev_id) |
131 | { | |
132 | struct kvm_assigned_dev_kernel *assigned_dev = dev_id; | |
133 | int index = find_index_from_host_irq(assigned_dev, irq); | |
134 | u32 vector; | |
135 | int ret = 0; | |
136 | ||
137 | if (index >= 0) { | |
138 | vector = assigned_dev->guest_msix_entries[index].vector; | |
139 | ret = kvm_set_irq_inatomic(assigned_dev->kvm, | |
140 | assigned_dev->irq_source_id, | |
141 | vector, 1); | |
142 | } | |
143 | ||
144 | return unlikely(ret == -EWOULDBLOCK) ? IRQ_WAKE_THREAD : IRQ_HANDLED; | |
145 | } | |
146 | ||
cc079396 JK |
147 | static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id) |
148 | { | |
149 | struct kvm_assigned_dev_kernel *assigned_dev = dev_id; | |
150 | int index = find_index_from_host_irq(assigned_dev, irq); | |
151 | u32 vector; | |
152 | ||
153 | if (index >= 0) { | |
154 | vector = assigned_dev->guest_msix_entries[index].vector; | |
07700a94 | 155 | kvm_assigned_dev_raise_guest_irq(assigned_dev, vector); |
cc079396 | 156 | } |
bfd99ff5 | 157 | |
bfd99ff5 AK |
158 | return IRQ_HANDLED; |
159 | } | |
cc079396 | 160 | #endif |
bfd99ff5 AK |
161 | |
162 | /* Ack the irq line for an assigned device */ | |
163 | static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) | |
164 | { | |
c61fa9d6 JK |
165 | struct kvm_assigned_dev_kernel *dev = |
166 | container_of(kian, struct kvm_assigned_dev_kernel, | |
167 | ack_notifier); | |
bfd99ff5 | 168 | |
aa2fbe6d | 169 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0, false); |
bfd99ff5 | 170 | |
cf9eeac4 | 171 | spin_lock(&dev->intx_mask_lock); |
07700a94 JK |
172 | |
173 | if (!(dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) { | |
174 | bool reassert = false; | |
175 | ||
176 | spin_lock_irq(&dev->intx_lock); | |
177 | /* | |
178 | * The guest IRQ may be shared so this ack can come from an | |
179 | * IRQ for another guest device. | |
180 | */ | |
181 | if (dev->host_irq_disabled) { | |
182 | if (!(dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) | |
183 | enable_irq(dev->host_irq); | |
184 | else if (!pci_check_and_unmask_intx(dev->dev)) | |
185 | reassert = true; | |
186 | dev->host_irq_disabled = reassert; | |
187 | } | |
188 | spin_unlock_irq(&dev->intx_lock); | |
189 | ||
190 | if (reassert) | |
191 | kvm_set_irq(dev->kvm, dev->irq_source_id, | |
aa2fbe6d | 192 | dev->guest_irq, 1, false); |
bfd99ff5 | 193 | } |
07700a94 | 194 | |
cf9eeac4 | 195 | spin_unlock(&dev->intx_mask_lock); |
bfd99ff5 AK |
196 | } |
197 | ||
198 | static void deassign_guest_irq(struct kvm *kvm, | |
199 | struct kvm_assigned_dev_kernel *assigned_dev) | |
200 | { | |
c61fa9d6 JK |
201 | if (assigned_dev->ack_notifier.gsi != -1) |
202 | kvm_unregister_irq_ack_notifier(kvm, | |
203 | &assigned_dev->ack_notifier); | |
bfd99ff5 | 204 | |
0c106b5a | 205 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, |
aa2fbe6d | 206 | assigned_dev->guest_irq, 0, false); |
0c106b5a | 207 | |
bfd99ff5 AK |
208 | if (assigned_dev->irq_source_id != -1) |
209 | kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); | |
210 | assigned_dev->irq_source_id = -1; | |
211 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK); | |
212 | } | |
213 | ||
214 | /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */ | |
215 | static void deassign_host_irq(struct kvm *kvm, | |
216 | struct kvm_assigned_dev_kernel *assigned_dev) | |
217 | { | |
218 | /* | |
0645211c | 219 | * We disable irq here to prevent further events. |
bfd99ff5 AK |
220 | * |
221 | * Notice this maybe result in nested disable if the interrupt type is | |
222 | * INTx, but it's OK for we are going to free it. | |
223 | * | |
224 | * If this function is a part of VM destroy, please ensure that till | |
225 | * now, the kvm state is still legal for probably we also have to wait | |
0645211c | 226 | * on a currently running IRQ handler. |
bfd99ff5 AK |
227 | */ |
228 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { | |
229 | int i; | |
230 | for (i = 0; i < assigned_dev->entries_nr; i++) | |
0645211c | 231 | disable_irq(assigned_dev->host_msix_entries[i].vector); |
bfd99ff5 AK |
232 | |
233 | for (i = 0; i < assigned_dev->entries_nr; i++) | |
234 | free_irq(assigned_dev->host_msix_entries[i].vector, | |
9f9f6b78 | 235 | assigned_dev); |
bfd99ff5 AK |
236 | |
237 | assigned_dev->entries_nr = 0; | |
238 | kfree(assigned_dev->host_msix_entries); | |
239 | kfree(assigned_dev->guest_msix_entries); | |
240 | pci_disable_msix(assigned_dev->dev); | |
241 | } else { | |
242 | /* Deal with MSI and INTx */ | |
07700a94 JK |
243 | if ((assigned_dev->irq_requested_type & |
244 | KVM_DEV_IRQ_HOST_INTX) && | |
245 | (assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) { | |
246 | spin_lock_irq(&assigned_dev->intx_lock); | |
247 | pci_intx(assigned_dev->dev, false); | |
248 | spin_unlock_irq(&assigned_dev->intx_lock); | |
249 | synchronize_irq(assigned_dev->host_irq); | |
250 | } else | |
251 | disable_irq(assigned_dev->host_irq); | |
bfd99ff5 | 252 | |
9f9f6b78 | 253 | free_irq(assigned_dev->host_irq, assigned_dev); |
bfd99ff5 AK |
254 | |
255 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI) | |
256 | pci_disable_msi(assigned_dev->dev); | |
257 | } | |
258 | ||
259 | assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK); | |
260 | } | |
261 | ||
262 | static int kvm_deassign_irq(struct kvm *kvm, | |
263 | struct kvm_assigned_dev_kernel *assigned_dev, | |
264 | unsigned long irq_requested_type) | |
265 | { | |
266 | unsigned long guest_irq_type, host_irq_type; | |
267 | ||
268 | if (!irqchip_in_kernel(kvm)) | |
269 | return -EINVAL; | |
270 | /* no irq assignment to deassign */ | |
271 | if (!assigned_dev->irq_requested_type) | |
272 | return -ENXIO; | |
273 | ||
274 | host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK; | |
275 | guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK; | |
276 | ||
277 | if (host_irq_type) | |
278 | deassign_host_irq(kvm, assigned_dev); | |
279 | if (guest_irq_type) | |
280 | deassign_guest_irq(kvm, assigned_dev); | |
281 | ||
282 | return 0; | |
283 | } | |
284 | ||
285 | static void kvm_free_assigned_irq(struct kvm *kvm, | |
286 | struct kvm_assigned_dev_kernel *assigned_dev) | |
287 | { | |
288 | kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type); | |
289 | } | |
290 | ||
291 | static void kvm_free_assigned_device(struct kvm *kvm, | |
292 | struct kvm_assigned_dev_kernel | |
293 | *assigned_dev) | |
294 | { | |
295 | kvm_free_assigned_irq(kvm, assigned_dev); | |
296 | ||
f8fcfd77 AW |
297 | pci_reset_function(assigned_dev->dev); |
298 | if (pci_load_and_free_saved_state(assigned_dev->dev, | |
299 | &assigned_dev->pci_saved_state)) | |
300 | printk(KERN_INFO "%s: Couldn't reload %s saved state\n", | |
301 | __func__, dev_name(&assigned_dev->dev->dev)); | |
302 | else | |
303 | pci_restore_state(assigned_dev->dev); | |
bfd99ff5 | 304 | |
6777829c GR |
305 | assigned_dev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; |
306 | ||
bfd99ff5 AK |
307 | pci_release_regions(assigned_dev->dev); |
308 | pci_disable_device(assigned_dev->dev); | |
309 | pci_dev_put(assigned_dev->dev); | |
310 | ||
311 | list_del(&assigned_dev->list); | |
312 | kfree(assigned_dev); | |
313 | } | |
314 | ||
315 | void kvm_free_all_assigned_devices(struct kvm *kvm) | |
316 | { | |
317 | struct list_head *ptr, *ptr2; | |
318 | struct kvm_assigned_dev_kernel *assigned_dev; | |
319 | ||
320 | list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { | |
321 | assigned_dev = list_entry(ptr, | |
322 | struct kvm_assigned_dev_kernel, | |
323 | list); | |
324 | ||
325 | kvm_free_assigned_device(kvm, assigned_dev); | |
326 | } | |
327 | } | |
328 | ||
329 | static int assigned_device_enable_host_intx(struct kvm *kvm, | |
330 | struct kvm_assigned_dev_kernel *dev) | |
331 | { | |
07700a94 JK |
332 | irq_handler_t irq_handler; |
333 | unsigned long flags; | |
334 | ||
bfd99ff5 | 335 | dev->host_irq = dev->dev->irq; |
07700a94 JK |
336 | |
337 | /* | |
338 | * We can only share the IRQ line with other host devices if we are | |
339 | * able to disable the IRQ source at device-level - independently of | |
340 | * the guest driver. Otherwise host devices may suffer from unbounded | |
341 | * IRQ latencies when the guest keeps the line asserted. | |
bfd99ff5 | 342 | */ |
07700a94 JK |
343 | if (dev->flags & KVM_DEV_ASSIGN_PCI_2_3) { |
344 | irq_handler = kvm_assigned_dev_intx; | |
345 | flags = IRQF_SHARED; | |
346 | } else { | |
347 | irq_handler = NULL; | |
348 | flags = IRQF_ONESHOT; | |
349 | } | |
350 | if (request_threaded_irq(dev->host_irq, irq_handler, | |
351 | kvm_assigned_dev_thread_intx, flags, | |
352 | dev->irq_name, dev)) | |
bfd99ff5 | 353 | return -EIO; |
07700a94 JK |
354 | |
355 | if (dev->flags & KVM_DEV_ASSIGN_PCI_2_3) { | |
356 | spin_lock_irq(&dev->intx_lock); | |
357 | pci_intx(dev->dev, true); | |
358 | spin_unlock_irq(&dev->intx_lock); | |
359 | } | |
bfd99ff5 AK |
360 | return 0; |
361 | } | |
362 | ||
363 | #ifdef __KVM_HAVE_MSI | |
364 | static int assigned_device_enable_host_msi(struct kvm *kvm, | |
365 | struct kvm_assigned_dev_kernel *dev) | |
366 | { | |
367 | int r; | |
368 | ||
369 | if (!dev->dev->msi_enabled) { | |
370 | r = pci_enable_msi(dev->dev); | |
371 | if (r) | |
372 | return r; | |
373 | } | |
374 | ||
375 | dev->host_irq = dev->dev->irq; | |
a76beb14 | 376 | if (request_threaded_irq(dev->host_irq, kvm_assigned_dev_msi, |
07700a94 JK |
377 | kvm_assigned_dev_thread_msi, 0, |
378 | dev->irq_name, dev)) { | |
bfd99ff5 AK |
379 | pci_disable_msi(dev->dev); |
380 | return -EIO; | |
381 | } | |
382 | ||
383 | return 0; | |
384 | } | |
385 | #endif | |
386 | ||
387 | #ifdef __KVM_HAVE_MSIX | |
388 | static int assigned_device_enable_host_msix(struct kvm *kvm, | |
389 | struct kvm_assigned_dev_kernel *dev) | |
390 | { | |
391 | int i, r = -EINVAL; | |
392 | ||
393 | /* host_msix_entries and guest_msix_entries should have been | |
394 | * initialized */ | |
395 | if (dev->entries_nr == 0) | |
396 | return r; | |
397 | ||
398 | r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr); | |
399 | if (r) | |
400 | return r; | |
401 | ||
402 | for (i = 0; i < dev->entries_nr; i++) { | |
0645211c | 403 | r = request_threaded_irq(dev->host_msix_entries[i].vector, |
a76beb14 AW |
404 | kvm_assigned_dev_msix, |
405 | kvm_assigned_dev_thread_msix, | |
9f9f6b78 | 406 | 0, dev->irq_name, dev); |
bfd99ff5 | 407 | if (r) |
d57e2c07 | 408 | goto err; |
bfd99ff5 AK |
409 | } |
410 | ||
411 | return 0; | |
d57e2c07 | 412 | err: |
413 | for (i -= 1; i >= 0; i--) | |
9f9f6b78 | 414 | free_irq(dev->host_msix_entries[i].vector, dev); |
d57e2c07 | 415 | pci_disable_msix(dev->dev); |
416 | return r; | |
bfd99ff5 AK |
417 | } |
418 | ||
419 | #endif | |
420 | ||
421 | static int assigned_device_enable_guest_intx(struct kvm *kvm, | |
422 | struct kvm_assigned_dev_kernel *dev, | |
423 | struct kvm_assigned_irq *irq) | |
424 | { | |
425 | dev->guest_irq = irq->guest_irq; | |
426 | dev->ack_notifier.gsi = irq->guest_irq; | |
427 | return 0; | |
428 | } | |
429 | ||
430 | #ifdef __KVM_HAVE_MSI | |
431 | static int assigned_device_enable_guest_msi(struct kvm *kvm, | |
432 | struct kvm_assigned_dev_kernel *dev, | |
433 | struct kvm_assigned_irq *irq) | |
434 | { | |
435 | dev->guest_irq = irq->guest_irq; | |
436 | dev->ack_notifier.gsi = -1; | |
bfd99ff5 AK |
437 | return 0; |
438 | } | |
439 | #endif | |
440 | ||
441 | #ifdef __KVM_HAVE_MSIX | |
442 | static int assigned_device_enable_guest_msix(struct kvm *kvm, | |
443 | struct kvm_assigned_dev_kernel *dev, | |
444 | struct kvm_assigned_irq *irq) | |
445 | { | |
446 | dev->guest_irq = irq->guest_irq; | |
447 | dev->ack_notifier.gsi = -1; | |
bfd99ff5 AK |
448 | return 0; |
449 | } | |
450 | #endif | |
451 | ||
452 | static int assign_host_irq(struct kvm *kvm, | |
453 | struct kvm_assigned_dev_kernel *dev, | |
454 | __u32 host_irq_type) | |
455 | { | |
456 | int r = -EEXIST; | |
457 | ||
458 | if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK) | |
459 | return r; | |
460 | ||
1e001d49 JK |
461 | snprintf(dev->irq_name, sizeof(dev->irq_name), "kvm:%s", |
462 | pci_name(dev->dev)); | |
463 | ||
bfd99ff5 AK |
464 | switch (host_irq_type) { |
465 | case KVM_DEV_IRQ_HOST_INTX: | |
466 | r = assigned_device_enable_host_intx(kvm, dev); | |
467 | break; | |
468 | #ifdef __KVM_HAVE_MSI | |
469 | case KVM_DEV_IRQ_HOST_MSI: | |
470 | r = assigned_device_enable_host_msi(kvm, dev); | |
471 | break; | |
472 | #endif | |
473 | #ifdef __KVM_HAVE_MSIX | |
474 | case KVM_DEV_IRQ_HOST_MSIX: | |
475 | r = assigned_device_enable_host_msix(kvm, dev); | |
476 | break; | |
477 | #endif | |
478 | default: | |
479 | r = -EINVAL; | |
480 | } | |
07700a94 | 481 | dev->host_irq_disabled = false; |
bfd99ff5 AK |
482 | |
483 | if (!r) | |
484 | dev->irq_requested_type |= host_irq_type; | |
485 | ||
486 | return r; | |
487 | } | |
488 | ||
489 | static int assign_guest_irq(struct kvm *kvm, | |
490 | struct kvm_assigned_dev_kernel *dev, | |
491 | struct kvm_assigned_irq *irq, | |
492 | unsigned long guest_irq_type) | |
493 | { | |
494 | int id; | |
495 | int r = -EEXIST; | |
496 | ||
497 | if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK) | |
498 | return r; | |
499 | ||
500 | id = kvm_request_irq_source_id(kvm); | |
501 | if (id < 0) | |
502 | return id; | |
503 | ||
504 | dev->irq_source_id = id; | |
505 | ||
506 | switch (guest_irq_type) { | |
507 | case KVM_DEV_IRQ_GUEST_INTX: | |
508 | r = assigned_device_enable_guest_intx(kvm, dev, irq); | |
509 | break; | |
510 | #ifdef __KVM_HAVE_MSI | |
511 | case KVM_DEV_IRQ_GUEST_MSI: | |
512 | r = assigned_device_enable_guest_msi(kvm, dev, irq); | |
513 | break; | |
514 | #endif | |
515 | #ifdef __KVM_HAVE_MSIX | |
516 | case KVM_DEV_IRQ_GUEST_MSIX: | |
517 | r = assigned_device_enable_guest_msix(kvm, dev, irq); | |
518 | break; | |
519 | #endif | |
520 | default: | |
521 | r = -EINVAL; | |
522 | } | |
523 | ||
524 | if (!r) { | |
525 | dev->irq_requested_type |= guest_irq_type; | |
c61fa9d6 JK |
526 | if (dev->ack_notifier.gsi != -1) |
527 | kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier); | |
bfd99ff5 AK |
528 | } else |
529 | kvm_free_irq_source_id(kvm, dev->irq_source_id); | |
530 | ||
531 | return r; | |
532 | } | |
533 | ||
534 | /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */ | |
535 | static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, | |
536 | struct kvm_assigned_irq *assigned_irq) | |
537 | { | |
538 | int r = -EINVAL; | |
539 | struct kvm_assigned_dev_kernel *match; | |
540 | unsigned long host_irq_type, guest_irq_type; | |
541 | ||
bfd99ff5 AK |
542 | if (!irqchip_in_kernel(kvm)) |
543 | return r; | |
544 | ||
545 | mutex_lock(&kvm->lock); | |
546 | r = -ENODEV; | |
547 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | |
548 | assigned_irq->assigned_dev_id); | |
549 | if (!match) | |
550 | goto out; | |
551 | ||
552 | host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK); | |
553 | guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK); | |
554 | ||
555 | r = -EINVAL; | |
556 | /* can only assign one type at a time */ | |
557 | if (hweight_long(host_irq_type) > 1) | |
558 | goto out; | |
559 | if (hweight_long(guest_irq_type) > 1) | |
560 | goto out; | |
561 | if (host_irq_type == 0 && guest_irq_type == 0) | |
562 | goto out; | |
563 | ||
564 | r = 0; | |
565 | if (host_irq_type) | |
566 | r = assign_host_irq(kvm, match, host_irq_type); | |
567 | if (r) | |
568 | goto out; | |
569 | ||
570 | if (guest_irq_type) | |
571 | r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type); | |
572 | out: | |
573 | mutex_unlock(&kvm->lock); | |
574 | return r; | |
575 | } | |
576 | ||
577 | static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, | |
578 | struct kvm_assigned_irq | |
579 | *assigned_irq) | |
580 | { | |
581 | int r = -ENODEV; | |
582 | struct kvm_assigned_dev_kernel *match; | |
07700a94 | 583 | unsigned long irq_type; |
bfd99ff5 AK |
584 | |
585 | mutex_lock(&kvm->lock); | |
586 | ||
587 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | |
588 | assigned_irq->assigned_dev_id); | |
589 | if (!match) | |
590 | goto out; | |
591 | ||
07700a94 JK |
592 | irq_type = assigned_irq->flags & (KVM_DEV_IRQ_HOST_MASK | |
593 | KVM_DEV_IRQ_GUEST_MASK); | |
594 | r = kvm_deassign_irq(kvm, match, irq_type); | |
bfd99ff5 AK |
595 | out: |
596 | mutex_unlock(&kvm->lock); | |
597 | return r; | |
598 | } | |
599 | ||
3d27e23b AW |
600 | /* |
601 | * We want to test whether the caller has been granted permissions to | |
602 | * use this device. To be able to configure and control the device, | |
603 | * the user needs access to PCI configuration space and BAR resources. | |
604 | * These are accessed through PCI sysfs. PCI config space is often | |
605 | * passed to the process calling this ioctl via file descriptor, so we | |
606 | * can't rely on access to that file. We can check for permissions | |
607 | * on each of the BAR resource files, which is a pretty clear | |
608 | * indicator that the user has been granted access to the device. | |
609 | */ | |
610 | static int probe_sysfs_permissions(struct pci_dev *dev) | |
611 | { | |
612 | #ifdef CONFIG_SYSFS | |
613 | int i; | |
614 | bool bar_found = false; | |
615 | ||
616 | for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) { | |
617 | char *kpath, *syspath; | |
618 | struct path path; | |
619 | struct inode *inode; | |
620 | int r; | |
621 | ||
622 | if (!pci_resource_len(dev, i)) | |
623 | continue; | |
624 | ||
625 | kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); | |
626 | if (!kpath) | |
627 | return -ENOMEM; | |
628 | ||
629 | /* Per sysfs-rules, sysfs is always at /sys */ | |
630 | syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i); | |
631 | kfree(kpath); | |
632 | if (!syspath) | |
633 | return -ENOMEM; | |
634 | ||
635 | r = kern_path(syspath, LOOKUP_FOLLOW, &path); | |
636 | kfree(syspath); | |
637 | if (r) | |
638 | return r; | |
639 | ||
640 | inode = path.dentry->d_inode; | |
641 | ||
642 | r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS); | |
643 | path_put(&path); | |
644 | if (r) | |
645 | return r; | |
646 | ||
647 | bar_found = true; | |
648 | } | |
649 | ||
650 | /* If no resources, probably something special */ | |
651 | if (!bar_found) | |
652 | return -EPERM; | |
653 | ||
654 | return 0; | |
655 | #else | |
656 | return -EINVAL; /* No way to control the device without sysfs */ | |
657 | #endif | |
658 | } | |
659 | ||
bfd99ff5 AK |
660 | static int kvm_vm_ioctl_assign_device(struct kvm *kvm, |
661 | struct kvm_assigned_pci_dev *assigned_dev) | |
662 | { | |
bc6678a3 | 663 | int r = 0, idx; |
bfd99ff5 AK |
664 | struct kvm_assigned_dev_kernel *match; |
665 | struct pci_dev *dev; | |
666 | ||
42387373 AW |
667 | if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)) |
668 | return -EINVAL; | |
669 | ||
bfd99ff5 | 670 | mutex_lock(&kvm->lock); |
bc6678a3 | 671 | idx = srcu_read_lock(&kvm->srcu); |
bfd99ff5 AK |
672 | |
673 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | |
674 | assigned_dev->assigned_dev_id); | |
675 | if (match) { | |
676 | /* device already assigned */ | |
677 | r = -EEXIST; | |
678 | goto out; | |
679 | } | |
680 | ||
681 | match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL); | |
682 | if (match == NULL) { | |
683 | printk(KERN_INFO "%s: Couldn't allocate memory\n", | |
684 | __func__); | |
685 | r = -ENOMEM; | |
686 | goto out; | |
687 | } | |
ab9f4ecb ZE |
688 | dev = pci_get_domain_bus_and_slot(assigned_dev->segnr, |
689 | assigned_dev->busnr, | |
bfd99ff5 AK |
690 | assigned_dev->devfn); |
691 | if (!dev) { | |
692 | printk(KERN_INFO "%s: host device not found\n", __func__); | |
693 | r = -EINVAL; | |
694 | goto out_free; | |
695 | } | |
3d27e23b AW |
696 | |
697 | /* Don't allow bridges to be assigned */ | |
f961f728 | 698 | if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) { |
3d27e23b AW |
699 | r = -EPERM; |
700 | goto out_put; | |
701 | } | |
702 | ||
703 | r = probe_sysfs_permissions(dev); | |
704 | if (r) | |
705 | goto out_put; | |
706 | ||
bfd99ff5 AK |
707 | if (pci_enable_device(dev)) { |
708 | printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); | |
709 | r = -EBUSY; | |
710 | goto out_put; | |
711 | } | |
712 | r = pci_request_regions(dev, "kvm_assigned_device"); | |
713 | if (r) { | |
714 | printk(KERN_INFO "%s: Could not get access to device regions\n", | |
715 | __func__); | |
716 | goto out_disable; | |
717 | } | |
718 | ||
719 | pci_reset_function(dev); | |
ed78661f | 720 | pci_save_state(dev); |
f8fcfd77 AW |
721 | match->pci_saved_state = pci_store_saved_state(dev); |
722 | if (!match->pci_saved_state) | |
723 | printk(KERN_DEBUG "%s: Couldn't store %s saved state\n", | |
724 | __func__, dev_name(&dev->dev)); | |
07700a94 JK |
725 | |
726 | if (!pci_intx_mask_supported(dev)) | |
727 | assigned_dev->flags &= ~KVM_DEV_ASSIGN_PCI_2_3; | |
728 | ||
bfd99ff5 | 729 | match->assigned_dev_id = assigned_dev->assigned_dev_id; |
ab9f4ecb | 730 | match->host_segnr = assigned_dev->segnr; |
bfd99ff5 AK |
731 | match->host_busnr = assigned_dev->busnr; |
732 | match->host_devfn = assigned_dev->devfn; | |
733 | match->flags = assigned_dev->flags; | |
734 | match->dev = dev; | |
0645211c | 735 | spin_lock_init(&match->intx_lock); |
cf9eeac4 | 736 | spin_lock_init(&match->intx_mask_lock); |
bfd99ff5 AK |
737 | match->irq_source_id = -1; |
738 | match->kvm = kvm; | |
739 | match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; | |
bfd99ff5 AK |
740 | |
741 | list_add(&match->list, &kvm->arch.assigned_dev_head); | |
742 | ||
42387373 AW |
743 | if (!kvm->arch.iommu_domain) { |
744 | r = kvm_iommu_map_guest(kvm); | |
bfd99ff5 AK |
745 | if (r) |
746 | goto out_list_del; | |
747 | } | |
42387373 AW |
748 | r = kvm_assign_device(kvm, match); |
749 | if (r) | |
750 | goto out_list_del; | |
bfd99ff5 AK |
751 | |
752 | out: | |
bc6678a3 | 753 | srcu_read_unlock(&kvm->srcu, idx); |
fae3a353 | 754 | mutex_unlock(&kvm->lock); |
bfd99ff5 AK |
755 | return r; |
756 | out_list_del: | |
f8fcfd77 AW |
757 | if (pci_load_and_free_saved_state(dev, &match->pci_saved_state)) |
758 | printk(KERN_INFO "%s: Couldn't reload %s saved state\n", | |
759 | __func__, dev_name(&dev->dev)); | |
bfd99ff5 AK |
760 | list_del(&match->list); |
761 | pci_release_regions(dev); | |
762 | out_disable: | |
763 | pci_disable_device(dev); | |
764 | out_put: | |
765 | pci_dev_put(dev); | |
766 | out_free: | |
767 | kfree(match); | |
bc6678a3 | 768 | srcu_read_unlock(&kvm->srcu, idx); |
fae3a353 | 769 | mutex_unlock(&kvm->lock); |
bfd99ff5 AK |
770 | return r; |
771 | } | |
772 | ||
773 | static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, | |
774 | struct kvm_assigned_pci_dev *assigned_dev) | |
775 | { | |
776 | int r = 0; | |
777 | struct kvm_assigned_dev_kernel *match; | |
778 | ||
779 | mutex_lock(&kvm->lock); | |
780 | ||
781 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | |
782 | assigned_dev->assigned_dev_id); | |
783 | if (!match) { | |
784 | printk(KERN_INFO "%s: device hasn't been assigned before, " | |
785 | "so cannot be deassigned\n", __func__); | |
786 | r = -EINVAL; | |
787 | goto out; | |
788 | } | |
789 | ||
42387373 | 790 | kvm_deassign_device(kvm, match); |
bfd99ff5 AK |
791 | |
792 | kvm_free_assigned_device(kvm, match); | |
793 | ||
794 | out: | |
795 | mutex_unlock(&kvm->lock); | |
796 | return r; | |
797 | } | |
798 | ||
799 | ||
800 | #ifdef __KVM_HAVE_MSIX | |
801 | static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm, | |
802 | struct kvm_assigned_msix_nr *entry_nr) | |
803 | { | |
804 | int r = 0; | |
805 | struct kvm_assigned_dev_kernel *adev; | |
806 | ||
807 | mutex_lock(&kvm->lock); | |
808 | ||
809 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | |
810 | entry_nr->assigned_dev_id); | |
811 | if (!adev) { | |
812 | r = -EINVAL; | |
813 | goto msix_nr_out; | |
814 | } | |
815 | ||
816 | if (adev->entries_nr == 0) { | |
817 | adev->entries_nr = entry_nr->entry_nr; | |
818 | if (adev->entries_nr == 0 || | |
9f3191ae | 819 | adev->entries_nr > KVM_MAX_MSIX_PER_DEV) { |
bfd99ff5 AK |
820 | r = -EINVAL; |
821 | goto msix_nr_out; | |
822 | } | |
823 | ||
824 | adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) * | |
825 | entry_nr->entry_nr, | |
826 | GFP_KERNEL); | |
827 | if (!adev->host_msix_entries) { | |
828 | r = -ENOMEM; | |
829 | goto msix_nr_out; | |
830 | } | |
0645211c JK |
831 | adev->guest_msix_entries = |
832 | kzalloc(sizeof(struct msix_entry) * entry_nr->entry_nr, | |
833 | GFP_KERNEL); | |
bfd99ff5 AK |
834 | if (!adev->guest_msix_entries) { |
835 | kfree(adev->host_msix_entries); | |
836 | r = -ENOMEM; | |
837 | goto msix_nr_out; | |
838 | } | |
839 | } else /* Not allowed set MSI-X number twice */ | |
840 | r = -EINVAL; | |
841 | msix_nr_out: | |
842 | mutex_unlock(&kvm->lock); | |
843 | return r; | |
844 | } | |
845 | ||
846 | static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm, | |
847 | struct kvm_assigned_msix_entry *entry) | |
848 | { | |
849 | int r = 0, i; | |
850 | struct kvm_assigned_dev_kernel *adev; | |
851 | ||
852 | mutex_lock(&kvm->lock); | |
853 | ||
854 | adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | |
855 | entry->assigned_dev_id); | |
856 | ||
857 | if (!adev) { | |
858 | r = -EINVAL; | |
859 | goto msix_entry_out; | |
860 | } | |
861 | ||
862 | for (i = 0; i < adev->entries_nr; i++) | |
863 | if (adev->guest_msix_entries[i].vector == 0 || | |
864 | adev->guest_msix_entries[i].entry == entry->entry) { | |
865 | adev->guest_msix_entries[i].entry = entry->entry; | |
866 | adev->guest_msix_entries[i].vector = entry->gsi; | |
867 | adev->host_msix_entries[i].entry = entry->entry; | |
868 | break; | |
869 | } | |
870 | if (i == adev->entries_nr) { | |
871 | r = -ENOSPC; | |
872 | goto msix_entry_out; | |
873 | } | |
874 | ||
875 | msix_entry_out: | |
876 | mutex_unlock(&kvm->lock); | |
877 | ||
878 | return r; | |
879 | } | |
880 | #endif | |
881 | ||
07700a94 JK |
882 | static int kvm_vm_ioctl_set_pci_irq_mask(struct kvm *kvm, |
883 | struct kvm_assigned_pci_dev *assigned_dev) | |
884 | { | |
885 | int r = 0; | |
886 | struct kvm_assigned_dev_kernel *match; | |
887 | ||
888 | mutex_lock(&kvm->lock); | |
889 | ||
890 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | |
891 | assigned_dev->assigned_dev_id); | |
892 | if (!match) { | |
893 | r = -ENODEV; | |
894 | goto out; | |
895 | } | |
896 | ||
cf9eeac4 | 897 | spin_lock(&match->intx_mask_lock); |
07700a94 JK |
898 | |
899 | match->flags &= ~KVM_DEV_ASSIGN_MASK_INTX; | |
900 | match->flags |= assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX; | |
901 | ||
902 | if (match->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { | |
903 | if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) { | |
904 | kvm_set_irq(match->kvm, match->irq_source_id, | |
aa2fbe6d | 905 | match->guest_irq, 0, false); |
07700a94 JK |
906 | /* |
907 | * Masking at hardware-level is performed on demand, | |
908 | * i.e. when an IRQ actually arrives at the host. | |
909 | */ | |
910 | } else if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) { | |
911 | /* | |
912 | * Unmask the IRQ line if required. Unmasking at | |
913 | * device level will be performed by user space. | |
914 | */ | |
915 | spin_lock_irq(&match->intx_lock); | |
916 | if (match->host_irq_disabled) { | |
917 | enable_irq(match->host_irq); | |
918 | match->host_irq_disabled = false; | |
919 | } | |
920 | spin_unlock_irq(&match->intx_lock); | |
921 | } | |
922 | } | |
923 | ||
cf9eeac4 | 924 | spin_unlock(&match->intx_mask_lock); |
07700a94 JK |
925 | |
926 | out: | |
927 | mutex_unlock(&kvm->lock); | |
928 | return r; | |
929 | } | |
930 | ||
bfd99ff5 AK |
931 | long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, |
932 | unsigned long arg) | |
933 | { | |
934 | void __user *argp = (void __user *)arg; | |
51de271d | 935 | int r; |
bfd99ff5 AK |
936 | |
937 | switch (ioctl) { | |
938 | case KVM_ASSIGN_PCI_DEVICE: { | |
939 | struct kvm_assigned_pci_dev assigned_dev; | |
940 | ||
941 | r = -EFAULT; | |
942 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | |
943 | goto out; | |
944 | r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev); | |
945 | if (r) | |
946 | goto out; | |
947 | break; | |
948 | } | |
949 | case KVM_ASSIGN_IRQ: { | |
950 | r = -EOPNOTSUPP; | |
951 | break; | |
952 | } | |
bfd99ff5 AK |
953 | case KVM_ASSIGN_DEV_IRQ: { |
954 | struct kvm_assigned_irq assigned_irq; | |
955 | ||
956 | r = -EFAULT; | |
957 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) | |
958 | goto out; | |
959 | r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq); | |
960 | if (r) | |
961 | goto out; | |
962 | break; | |
963 | } | |
964 | case KVM_DEASSIGN_DEV_IRQ: { | |
965 | struct kvm_assigned_irq assigned_irq; | |
966 | ||
967 | r = -EFAULT; | |
968 | if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) | |
969 | goto out; | |
970 | r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq); | |
971 | if (r) | |
972 | goto out; | |
973 | break; | |
974 | } | |
bfd99ff5 AK |
975 | case KVM_DEASSIGN_PCI_DEVICE: { |
976 | struct kvm_assigned_pci_dev assigned_dev; | |
977 | ||
978 | r = -EFAULT; | |
979 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | |
980 | goto out; | |
981 | r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev); | |
982 | if (r) | |
983 | goto out; | |
984 | break; | |
985 | } | |
bfd99ff5 AK |
986 | #ifdef __KVM_HAVE_MSIX |
987 | case KVM_ASSIGN_SET_MSIX_NR: { | |
988 | struct kvm_assigned_msix_nr entry_nr; | |
989 | r = -EFAULT; | |
990 | if (copy_from_user(&entry_nr, argp, sizeof entry_nr)) | |
991 | goto out; | |
992 | r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr); | |
993 | if (r) | |
994 | goto out; | |
995 | break; | |
996 | } | |
997 | case KVM_ASSIGN_SET_MSIX_ENTRY: { | |
998 | struct kvm_assigned_msix_entry entry; | |
999 | r = -EFAULT; | |
1000 | if (copy_from_user(&entry, argp, sizeof entry)) | |
1001 | goto out; | |
1002 | r = kvm_vm_ioctl_set_msix_entry(kvm, &entry); | |
1003 | if (r) | |
1004 | goto out; | |
1005 | break; | |
1006 | } | |
1007 | #endif | |
07700a94 JK |
1008 | case KVM_ASSIGN_SET_INTX_MASK: { |
1009 | struct kvm_assigned_pci_dev assigned_dev; | |
1010 | ||
1011 | r = -EFAULT; | |
1012 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | |
1013 | goto out; | |
1014 | r = kvm_vm_ioctl_set_pci_irq_mask(kvm, &assigned_dev); | |
1015 | break; | |
1016 | } | |
51de271d JK |
1017 | default: |
1018 | r = -ENOTTY; | |
1019 | break; | |
bfd99ff5 AK |
1020 | } |
1021 | out: | |
1022 | return r; | |
1023 | } |