Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[deliverable/linux.git] / drivers / vfio / pci / vfio_pci_intrs.c
1 /*
2 * VFIO PCI interrupt handling
3 *
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
14 */
15
16 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/eventfd.h>
19 #include <linux/pci.h>
20 #include <linux/file.h>
21 #include <linux/poll.h>
22 #include <linux/vfio.h>
23 #include <linux/wait.h>
24 #include <linux/workqueue.h>
25 #include <linux/slab.h>
26
27 #include "vfio_pci_private.h"
28
29 /*
30 * IRQfd - generic
31 */
32 struct virqfd {
33 struct vfio_pci_device *vdev;
34 struct eventfd_ctx *eventfd;
35 int (*handler)(struct vfio_pci_device *, void *);
36 void (*thread)(struct vfio_pci_device *, void *);
37 void *data;
38 struct work_struct inject;
39 wait_queue_t wait;
40 poll_table pt;
41 struct work_struct shutdown;
42 struct virqfd **pvirqfd;
43 };
44
45 static struct workqueue_struct *vfio_irqfd_cleanup_wq;
46
47 int __init vfio_pci_virqfd_init(void)
48 {
49 vfio_irqfd_cleanup_wq =
50 create_singlethread_workqueue("vfio-irqfd-cleanup");
51 if (!vfio_irqfd_cleanup_wq)
52 return -ENOMEM;
53
54 return 0;
55 }
56
57 void vfio_pci_virqfd_exit(void)
58 {
59 destroy_workqueue(vfio_irqfd_cleanup_wq);
60 }
61
62 static void virqfd_deactivate(struct virqfd *virqfd)
63 {
64 queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
65 }
66
67 static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
68 {
69 struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
70 unsigned long flags = (unsigned long)key;
71
72 if (flags & POLLIN) {
73 /* An event has been signaled, call function */
74 if ((!virqfd->handler ||
75 virqfd->handler(virqfd->vdev, virqfd->data)) &&
76 virqfd->thread)
77 schedule_work(&virqfd->inject);
78 }
79
80 if (flags & POLLHUP) {
81 unsigned long flags;
82 spin_lock_irqsave(&virqfd->vdev->irqlock, flags);
83
84 /*
85 * The eventfd is closing, if the virqfd has not yet been
86 * queued for release, as determined by testing whether the
87 * vdev pointer to it is still valid, queue it now. As
88 * with kvm irqfds, we know we won't race against the virqfd
89 * going away because we hold wqh->lock to get here.
90 */
91 if (*(virqfd->pvirqfd) == virqfd) {
92 *(virqfd->pvirqfd) = NULL;
93 virqfd_deactivate(virqfd);
94 }
95
96 spin_unlock_irqrestore(&virqfd->vdev->irqlock, flags);
97 }
98
99 return 0;
100 }
101
102 static void virqfd_ptable_queue_proc(struct file *file,
103 wait_queue_head_t *wqh, poll_table *pt)
104 {
105 struct virqfd *virqfd = container_of(pt, struct virqfd, pt);
106 add_wait_queue(wqh, &virqfd->wait);
107 }
108
109 static void virqfd_shutdown(struct work_struct *work)
110 {
111 struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
112 u64 cnt;
113
114 eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
115 flush_work(&virqfd->inject);
116 eventfd_ctx_put(virqfd->eventfd);
117
118 kfree(virqfd);
119 }
120
121 static void virqfd_inject(struct work_struct *work)
122 {
123 struct virqfd *virqfd = container_of(work, struct virqfd, inject);
124 if (virqfd->thread)
125 virqfd->thread(virqfd->vdev, virqfd->data);
126 }
127
128 static int virqfd_enable(struct vfio_pci_device *vdev,
129 int (*handler)(struct vfio_pci_device *, void *),
130 void (*thread)(struct vfio_pci_device *, void *),
131 void *data, struct virqfd **pvirqfd, int fd)
132 {
133 struct file *file = NULL;
134 struct eventfd_ctx *ctx = NULL;
135 struct virqfd *virqfd;
136 int ret = 0;
137 unsigned int events;
138
139 virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
140 if (!virqfd)
141 return -ENOMEM;
142
143 virqfd->pvirqfd = pvirqfd;
144 virqfd->vdev = vdev;
145 virqfd->handler = handler;
146 virqfd->thread = thread;
147 virqfd->data = data;
148
149 INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
150 INIT_WORK(&virqfd->inject, virqfd_inject);
151
152 file = eventfd_fget(fd);
153 if (IS_ERR(file)) {
154 ret = PTR_ERR(file);
155 goto fail;
156 }
157
158 ctx = eventfd_ctx_fileget(file);
159 if (IS_ERR(ctx)) {
160 ret = PTR_ERR(ctx);
161 goto fail;
162 }
163
164 virqfd->eventfd = ctx;
165
166 /*
167 * virqfds can be released by closing the eventfd or directly
168 * through ioctl. These are both done through a workqueue, so
169 * we update the pointer to the virqfd under lock to avoid
170 * pushing multiple jobs to release the same virqfd.
171 */
172 spin_lock_irq(&vdev->irqlock);
173
174 if (*pvirqfd) {
175 spin_unlock_irq(&vdev->irqlock);
176 ret = -EBUSY;
177 goto fail;
178 }
179 *pvirqfd = virqfd;
180
181 spin_unlock_irq(&vdev->irqlock);
182
183 /*
184 * Install our own custom wake-up handling so we are notified via
185 * a callback whenever someone signals the underlying eventfd.
186 */
187 init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
188 init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
189
190 events = file->f_op->poll(file, &virqfd->pt);
191
192 /*
193 * Check if there was an event already pending on the eventfd
194 * before we registered and trigger it as if we didn't miss it.
195 */
196 if (events & POLLIN) {
197 if ((!handler || handler(vdev, data)) && thread)
198 schedule_work(&virqfd->inject);
199 }
200
201 /*
202 * Do not drop the file until the irqfd is fully initialized,
203 * otherwise we might race against the POLLHUP.
204 */
205 fput(file);
206
207 return 0;
208
209 fail:
210 if (ctx && !IS_ERR(ctx))
211 eventfd_ctx_put(ctx);
212
213 if (file && !IS_ERR(file))
214 fput(file);
215
216 kfree(virqfd);
217
218 return ret;
219 }
220
221 static void virqfd_disable(struct vfio_pci_device *vdev,
222 struct virqfd **pvirqfd)
223 {
224 unsigned long flags;
225
226 spin_lock_irqsave(&vdev->irqlock, flags);
227
228 if (*pvirqfd) {
229 virqfd_deactivate(*pvirqfd);
230 *pvirqfd = NULL;
231 }
232
233 spin_unlock_irqrestore(&vdev->irqlock, flags);
234
235 /*
236 * Block until we know all outstanding shutdown jobs have completed.
237 * Even if we don't queue the job, flush the wq to be sure it's
238 * been released.
239 */
240 flush_workqueue(vfio_irqfd_cleanup_wq);
241 }
242
243 /*
244 * INTx
245 */
246 static void vfio_send_intx_eventfd(struct vfio_pci_device *vdev, void *unused)
247 {
248 if (likely(is_intx(vdev) && !vdev->virq_disabled))
249 eventfd_signal(vdev->ctx[0].trigger, 1);
250 }
251
252 void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
253 {
254 struct pci_dev *pdev = vdev->pdev;
255 unsigned long flags;
256
257 spin_lock_irqsave(&vdev->irqlock, flags);
258
259 /*
260 * Masking can come from interrupt, ioctl, or config space
261 * via INTx disable. The latter means this can get called
262 * even when not using intx delivery. In this case, just
263 * try to have the physical bit follow the virtual bit.
264 */
265 if (unlikely(!is_intx(vdev))) {
266 if (vdev->pci_2_3)
267 pci_intx(pdev, 0);
268 } else if (!vdev->ctx[0].masked) {
269 /*
270 * Can't use check_and_mask here because we always want to
271 * mask, not just when something is pending.
272 */
273 if (vdev->pci_2_3)
274 pci_intx(pdev, 0);
275 else
276 disable_irq_nosync(pdev->irq);
277
278 vdev->ctx[0].masked = true;
279 }
280
281 spin_unlock_irqrestore(&vdev->irqlock, flags);
282 }
283
284 /*
285 * If this is triggered by an eventfd, we can't call eventfd_signal
286 * or else we'll deadlock on the eventfd wait queue. Return >0 when
287 * a signal is necessary, which can then be handled via a work queue
288 * or directly depending on the caller.
289 */
290 int vfio_pci_intx_unmask_handler(struct vfio_pci_device *vdev, void *unused)
291 {
292 struct pci_dev *pdev = vdev->pdev;
293 unsigned long flags;
294 int ret = 0;
295
296 spin_lock_irqsave(&vdev->irqlock, flags);
297
298 /*
299 * Unmasking comes from ioctl or config, so again, have the
300 * physical bit follow the virtual even when not using INTx.
301 */
302 if (unlikely(!is_intx(vdev))) {
303 if (vdev->pci_2_3)
304 pci_intx(pdev, 1);
305 } else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
306 /*
307 * A pending interrupt here would immediately trigger,
308 * but we can avoid that overhead by just re-sending
309 * the interrupt to the user.
310 */
311 if (vdev->pci_2_3) {
312 if (!pci_check_and_unmask_intx(pdev))
313 ret = 1;
314 } else
315 enable_irq(pdev->irq);
316
317 vdev->ctx[0].masked = (ret > 0);
318 }
319
320 spin_unlock_irqrestore(&vdev->irqlock, flags);
321
322 return ret;
323 }
324
325 void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
326 {
327 if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
328 vfio_send_intx_eventfd(vdev, NULL);
329 }
330
331 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
332 {
333 struct vfio_pci_device *vdev = dev_id;
334 unsigned long flags;
335 int ret = IRQ_NONE;
336
337 spin_lock_irqsave(&vdev->irqlock, flags);
338
339 if (!vdev->pci_2_3) {
340 disable_irq_nosync(vdev->pdev->irq);
341 vdev->ctx[0].masked = true;
342 ret = IRQ_HANDLED;
343 } else if (!vdev->ctx[0].masked && /* may be shared */
344 pci_check_and_mask_intx(vdev->pdev)) {
345 vdev->ctx[0].masked = true;
346 ret = IRQ_HANDLED;
347 }
348
349 spin_unlock_irqrestore(&vdev->irqlock, flags);
350
351 if (ret == IRQ_HANDLED)
352 vfio_send_intx_eventfd(vdev, NULL);
353
354 return ret;
355 }
356
357 static int vfio_intx_enable(struct vfio_pci_device *vdev)
358 {
359 if (!is_irq_none(vdev))
360 return -EINVAL;
361
362 if (!vdev->pdev->irq)
363 return -ENODEV;
364
365 vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
366 if (!vdev->ctx)
367 return -ENOMEM;
368
369 vdev->num_ctx = 1;
370
371 /*
372 * If the virtual interrupt is masked, restore it. Devices
373 * supporting DisINTx can be masked at the hardware level
374 * here, non-PCI-2.3 devices will have to wait until the
375 * interrupt is enabled.
376 */
377 vdev->ctx[0].masked = vdev->virq_disabled;
378 if (vdev->pci_2_3)
379 pci_intx(vdev->pdev, !vdev->ctx[0].masked);
380
381 vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
382
383 return 0;
384 }
385
386 static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
387 {
388 struct pci_dev *pdev = vdev->pdev;
389 unsigned long irqflags = IRQF_SHARED;
390 struct eventfd_ctx *trigger;
391 unsigned long flags;
392 int ret;
393
394 if (vdev->ctx[0].trigger) {
395 free_irq(pdev->irq, vdev);
396 kfree(vdev->ctx[0].name);
397 eventfd_ctx_put(vdev->ctx[0].trigger);
398 vdev->ctx[0].trigger = NULL;
399 }
400
401 if (fd < 0) /* Disable only */
402 return 0;
403
404 vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
405 pci_name(pdev));
406 if (!vdev->ctx[0].name)
407 return -ENOMEM;
408
409 trigger = eventfd_ctx_fdget(fd);
410 if (IS_ERR(trigger)) {
411 kfree(vdev->ctx[0].name);
412 return PTR_ERR(trigger);
413 }
414
415 vdev->ctx[0].trigger = trigger;
416
417 if (!vdev->pci_2_3)
418 irqflags = 0;
419
420 ret = request_irq(pdev->irq, vfio_intx_handler,
421 irqflags, vdev->ctx[0].name, vdev);
422 if (ret) {
423 vdev->ctx[0].trigger = NULL;
424 kfree(vdev->ctx[0].name);
425 eventfd_ctx_put(trigger);
426 return ret;
427 }
428
429 /*
430 * INTx disable will stick across the new irq setup,
431 * disable_irq won't.
432 */
433 spin_lock_irqsave(&vdev->irqlock, flags);
434 if (!vdev->pci_2_3 && vdev->ctx[0].masked)
435 disable_irq_nosync(pdev->irq);
436 spin_unlock_irqrestore(&vdev->irqlock, flags);
437
438 return 0;
439 }
440
441 static void vfio_intx_disable(struct vfio_pci_device *vdev)
442 {
443 vfio_intx_set_signal(vdev, -1);
444 virqfd_disable(vdev, &vdev->ctx[0].unmask);
445 virqfd_disable(vdev, &vdev->ctx[0].mask);
446 vdev->irq_type = VFIO_PCI_NUM_IRQS;
447 vdev->num_ctx = 0;
448 kfree(vdev->ctx);
449 }
450
451 /*
452 * MSI/MSI-X
453 */
454 static irqreturn_t vfio_msihandler(int irq, void *arg)
455 {
456 struct eventfd_ctx *trigger = arg;
457
458 eventfd_signal(trigger, 1);
459 return IRQ_HANDLED;
460 }
461
462 static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
463 {
464 struct pci_dev *pdev = vdev->pdev;
465 int ret;
466
467 if (!is_irq_none(vdev))
468 return -EINVAL;
469
470 vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
471 if (!vdev->ctx)
472 return -ENOMEM;
473
474 if (msix) {
475 int i;
476
477 vdev->msix = kzalloc(nvec * sizeof(struct msix_entry),
478 GFP_KERNEL);
479 if (!vdev->msix) {
480 kfree(vdev->ctx);
481 return -ENOMEM;
482 }
483
484 for (i = 0; i < nvec; i++)
485 vdev->msix[i].entry = i;
486
487 ret = pci_enable_msix(pdev, vdev->msix, nvec);
488 if (ret) {
489 kfree(vdev->msix);
490 kfree(vdev->ctx);
491 return ret;
492 }
493 } else {
494 ret = pci_enable_msi_block(pdev, nvec);
495 if (ret) {
496 kfree(vdev->ctx);
497 return ret;
498 }
499 }
500
501 vdev->num_ctx = nvec;
502 vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
503 VFIO_PCI_MSI_IRQ_INDEX;
504
505 if (!msix) {
506 /*
507 * Compute the virtual hardware field for max msi vectors -
508 * it is the log base 2 of the number of vectors.
509 */
510 vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
511 }
512
513 return 0;
514 }
515
516 static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
517 int vector, int fd, bool msix)
518 {
519 struct pci_dev *pdev = vdev->pdev;
520 int irq = msix ? vdev->msix[vector].vector : pdev->irq + vector;
521 char *name = msix ? "vfio-msix" : "vfio-msi";
522 struct eventfd_ctx *trigger;
523 int ret;
524
525 if (vector >= vdev->num_ctx)
526 return -EINVAL;
527
528 if (vdev->ctx[vector].trigger) {
529 free_irq(irq, vdev->ctx[vector].trigger);
530 kfree(vdev->ctx[vector].name);
531 eventfd_ctx_put(vdev->ctx[vector].trigger);
532 vdev->ctx[vector].trigger = NULL;
533 }
534
535 if (fd < 0)
536 return 0;
537
538 vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "%s[%d](%s)",
539 name, vector, pci_name(pdev));
540 if (!vdev->ctx[vector].name)
541 return -ENOMEM;
542
543 trigger = eventfd_ctx_fdget(fd);
544 if (IS_ERR(trigger)) {
545 kfree(vdev->ctx[vector].name);
546 return PTR_ERR(trigger);
547 }
548
549 ret = request_irq(irq, vfio_msihandler, 0,
550 vdev->ctx[vector].name, trigger);
551 if (ret) {
552 kfree(vdev->ctx[vector].name);
553 eventfd_ctx_put(trigger);
554 return ret;
555 }
556
557 vdev->ctx[vector].trigger = trigger;
558
559 return 0;
560 }
561
562 static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
563 unsigned count, int32_t *fds, bool msix)
564 {
565 int i, j, ret = 0;
566
567 if (start + count > vdev->num_ctx)
568 return -EINVAL;
569
570 for (i = 0, j = start; i < count && !ret; i++, j++) {
571 int fd = fds ? fds[i] : -1;
572 ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
573 }
574
575 if (ret) {
576 for (--j; j >= start; j--)
577 vfio_msi_set_vector_signal(vdev, j, -1, msix);
578 }
579
580 return ret;
581 }
582
583 static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
584 {
585 struct pci_dev *pdev = vdev->pdev;
586 int i;
587
588 vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
589
590 for (i = 0; i < vdev->num_ctx; i++) {
591 virqfd_disable(vdev, &vdev->ctx[i].unmask);
592 virqfd_disable(vdev, &vdev->ctx[i].mask);
593 }
594
595 if (msix) {
596 pci_disable_msix(vdev->pdev);
597 kfree(vdev->msix);
598 } else
599 pci_disable_msi(pdev);
600
601 vdev->irq_type = VFIO_PCI_NUM_IRQS;
602 vdev->num_ctx = 0;
603 kfree(vdev->ctx);
604 }
605
606 /*
607 * IOCTL support
608 */
609 static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
610 unsigned index, unsigned start,
611 unsigned count, uint32_t flags, void *data)
612 {
613 if (!is_intx(vdev) || start != 0 || count != 1)
614 return -EINVAL;
615
616 if (flags & VFIO_IRQ_SET_DATA_NONE) {
617 vfio_pci_intx_unmask(vdev);
618 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
619 uint8_t unmask = *(uint8_t *)data;
620 if (unmask)
621 vfio_pci_intx_unmask(vdev);
622 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
623 int32_t fd = *(int32_t *)data;
624 if (fd >= 0)
625 return virqfd_enable(vdev, vfio_pci_intx_unmask_handler,
626 vfio_send_intx_eventfd, NULL,
627 &vdev->ctx[0].unmask, fd);
628
629 virqfd_disable(vdev, &vdev->ctx[0].unmask);
630 }
631
632 return 0;
633 }
634
635 static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev,
636 unsigned index, unsigned start,
637 unsigned count, uint32_t flags, void *data)
638 {
639 if (!is_intx(vdev) || start != 0 || count != 1)
640 return -EINVAL;
641
642 if (flags & VFIO_IRQ_SET_DATA_NONE) {
643 vfio_pci_intx_mask(vdev);
644 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
645 uint8_t mask = *(uint8_t *)data;
646 if (mask)
647 vfio_pci_intx_mask(vdev);
648 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
649 return -ENOTTY; /* XXX implement me */
650 }
651
652 return 0;
653 }
654
655 static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
656 unsigned index, unsigned start,
657 unsigned count, uint32_t flags, void *data)
658 {
659 if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
660 vfio_intx_disable(vdev);
661 return 0;
662 }
663
664 if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
665 return -EINVAL;
666
667 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
668 int32_t fd = *(int32_t *)data;
669 int ret;
670
671 if (is_intx(vdev))
672 return vfio_intx_set_signal(vdev, fd);
673
674 ret = vfio_intx_enable(vdev);
675 if (ret)
676 return ret;
677
678 ret = vfio_intx_set_signal(vdev, fd);
679 if (ret)
680 vfio_intx_disable(vdev);
681
682 return ret;
683 }
684
685 if (!is_intx(vdev))
686 return -EINVAL;
687
688 if (flags & VFIO_IRQ_SET_DATA_NONE) {
689 vfio_send_intx_eventfd(vdev, NULL);
690 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
691 uint8_t trigger = *(uint8_t *)data;
692 if (trigger)
693 vfio_send_intx_eventfd(vdev, NULL);
694 }
695 return 0;
696 }
697
698 static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
699 unsigned index, unsigned start,
700 unsigned count, uint32_t flags, void *data)
701 {
702 int i;
703 bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
704
705 if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
706 vfio_msi_disable(vdev, msix);
707 return 0;
708 }
709
710 if (!(irq_is(vdev, index) || is_irq_none(vdev)))
711 return -EINVAL;
712
713 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
714 int32_t *fds = data;
715 int ret;
716
717 if (vdev->irq_type == index)
718 return vfio_msi_set_block(vdev, start, count,
719 fds, msix);
720
721 ret = vfio_msi_enable(vdev, start + count, msix);
722 if (ret)
723 return ret;
724
725 ret = vfio_msi_set_block(vdev, start, count, fds, msix);
726 if (ret)
727 vfio_msi_disable(vdev, msix);
728
729 return ret;
730 }
731
732 if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
733 return -EINVAL;
734
735 for (i = start; i < start + count; i++) {
736 if (!vdev->ctx[i].trigger)
737 continue;
738 if (flags & VFIO_IRQ_SET_DATA_NONE) {
739 eventfd_signal(vdev->ctx[i].trigger, 1);
740 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
741 uint8_t *bools = data;
742 if (bools[i - start])
743 eventfd_signal(vdev->ctx[i].trigger, 1);
744 }
745 }
746 return 0;
747 }
748
749 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
750 unsigned index, unsigned start, unsigned count,
751 void *data)
752 {
753 int (*func)(struct vfio_pci_device *vdev, unsigned index,
754 unsigned start, unsigned count, uint32_t flags,
755 void *data) = NULL;
756
757 switch (index) {
758 case VFIO_PCI_INTX_IRQ_INDEX:
759 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
760 case VFIO_IRQ_SET_ACTION_MASK:
761 func = vfio_pci_set_intx_mask;
762 break;
763 case VFIO_IRQ_SET_ACTION_UNMASK:
764 func = vfio_pci_set_intx_unmask;
765 break;
766 case VFIO_IRQ_SET_ACTION_TRIGGER:
767 func = vfio_pci_set_intx_trigger;
768 break;
769 }
770 break;
771 case VFIO_PCI_MSI_IRQ_INDEX:
772 case VFIO_PCI_MSIX_IRQ_INDEX:
773 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
774 case VFIO_IRQ_SET_ACTION_MASK:
775 case VFIO_IRQ_SET_ACTION_UNMASK:
776 /* XXX Need masking support exported */
777 break;
778 case VFIO_IRQ_SET_ACTION_TRIGGER:
779 func = vfio_pci_set_msi_trigger;
780 break;
781 }
782 break;
783 }
784
785 if (!func)
786 return -ENOTTY;
787
788 return func(vdev, index, start, count, flags, data);
789 }
This page took 0.098758 seconds and 6 git commands to generate.