kvm/eventfd: factor out kvm_notify_acked_gsi()
[deliverable/linux.git] / virt / kvm / eventfd.c
CommitLineData
721eecbf
GH
1/*
2 * kvm eventfd support - use eventfd objects to signal various KVM events
3 *
4 * Copyright 2009 Novell. All Rights Reserved.
221d059d 5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
721eecbf
GH
6 *
7 * Author:
8 * Gregory Haskins <ghaskins@novell.com>
9 *
10 * This file is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 */
23
24#include <linux/kvm_host.h>
d34e6b17 25#include <linux/kvm.h>
166c9775 26#include <linux/kvm_irqfd.h>
721eecbf
GH
27#include <linux/workqueue.h>
28#include <linux/syscalls.h>
29#include <linux/wait.h>
30#include <linux/poll.h>
31#include <linux/file.h>
32#include <linux/list.h>
33#include <linux/eventfd.h>
d34e6b17 34#include <linux/kernel.h>
719d93cd 35#include <linux/srcu.h>
5a0e3ad6 36#include <linux/slab.h>
56f89f36 37#include <linux/seqlock.h>
9016cfb5 38#include <linux/irqbypass.h>
e4d57e1e 39#include <trace/events/kvm.h>
d34e6b17 40
af669ac6 41#include <kvm/iodev.h>
721eecbf 42
297e2105 43#ifdef CONFIG_HAVE_KVM_IRQFD
721eecbf
GH
44
45static struct workqueue_struct *irqfd_cleanup_wq;
46
47static void
48irqfd_inject(struct work_struct *work)
49{
166c9775
EA
50 struct kvm_kernel_irqfd *irqfd =
51 container_of(work, struct kvm_kernel_irqfd, inject);
721eecbf
GH
52 struct kvm *kvm = irqfd->kvm;
53
7a84428a 54 if (!irqfd->resampler) {
aa2fbe6d
YZ
55 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
56 false);
57 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
58 false);
7a84428a
AW
59 } else
60 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
aa2fbe6d 61 irqfd->gsi, 1, false);
7a84428a
AW
62}
63
64/*
65 * Since resampler irqfds share an IRQ source ID, we de-assert once
66 * then notify all of the resampler irqfds using this GSI. We can't
67 * do multiple de-asserts or we risk racing with incoming re-asserts.
68 */
69static void
70irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
71{
166c9775 72 struct kvm_kernel_irqfd_resampler *resampler;
719d93cd 73 struct kvm *kvm;
166c9775 74 struct kvm_kernel_irqfd *irqfd;
719d93cd 75 int idx;
7a84428a 76
166c9775
EA
77 resampler = container_of(kian,
78 struct kvm_kernel_irqfd_resampler, notifier);
719d93cd 79 kvm = resampler->kvm;
7a84428a 80
719d93cd 81 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
aa2fbe6d 82 resampler->notifier.gsi, 0, false);
7a84428a 83
719d93cd 84 idx = srcu_read_lock(&kvm->irq_srcu);
7a84428a
AW
85
86 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
87 eventfd_signal(irqfd->resamplefd, 1);
88
719d93cd 89 srcu_read_unlock(&kvm->irq_srcu, idx);
7a84428a
AW
90}
91
92static void
166c9775 93irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
7a84428a 94{
166c9775 95 struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
7a84428a
AW
96 struct kvm *kvm = resampler->kvm;
97
98 mutex_lock(&kvm->irqfds.resampler_lock);
99
100 list_del_rcu(&irqfd->resampler_link);
719d93cd 101 synchronize_srcu(&kvm->irq_srcu);
7a84428a
AW
102
103 if (list_empty(&resampler->list)) {
104 list_del(&resampler->link);
105 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
106 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
aa2fbe6d 107 resampler->notifier.gsi, 0, false);
7a84428a
AW
108 kfree(resampler);
109 }
110
111 mutex_unlock(&kvm->irqfds.resampler_lock);
721eecbf
GH
112}
113
114/*
115 * Race-free decouple logic (ordering is critical)
116 */
117static void
118irqfd_shutdown(struct work_struct *work)
119{
166c9775
EA
120 struct kvm_kernel_irqfd *irqfd =
121 container_of(work, struct kvm_kernel_irqfd, shutdown);
b6a114d2 122 u64 cnt;
721eecbf
GH
123
124 /*
125 * Synchronize with the wait-queue and unhook ourselves to prevent
126 * further events.
127 */
b6a114d2 128 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
721eecbf
GH
129
130 /*
131 * We know no new events will be scheduled at this point, so block
132 * until all previously outstanding events have completed
133 */
43829731 134 flush_work(&irqfd->inject);
721eecbf 135
7a84428a
AW
136 if (irqfd->resampler) {
137 irqfd_resampler_shutdown(irqfd);
138 eventfd_ctx_put(irqfd->resamplefd);
139 }
140
721eecbf
GH
141 /*
142 * It is now safe to release the object's resources
143 */
9016cfb5
EA
144#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
145 irq_bypass_unregister_consumer(&irqfd->consumer);
146#endif
721eecbf
GH
147 eventfd_ctx_put(irqfd->eventfd);
148 kfree(irqfd);
149}
150
151
152/* assumes kvm->irqfds.lock is held */
153static bool
166c9775 154irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
721eecbf
GH
155{
156 return list_empty(&irqfd->list) ? false : true;
157}
158
159/*
160 * Mark the irqfd as inactive and schedule it for removal
161 *
162 * assumes kvm->irqfds.lock is held
163 */
164static void
166c9775 165irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
721eecbf
GH
166{
167 BUG_ON(!irqfd_is_active(irqfd));
168
169 list_del_init(&irqfd->list);
170
171 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
172}
173
174/*
175 * Called with wqh->lock held and interrupts disabled
176 */
177static int
178irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
179{
166c9775
EA
180 struct kvm_kernel_irqfd *irqfd =
181 container_of(wait, struct kvm_kernel_irqfd, wait);
721eecbf 182 unsigned long flags = (unsigned long)key;
56f89f36 183 struct kvm_kernel_irq_routing_entry irq;
bd2b53b2 184 struct kvm *kvm = irqfd->kvm;
56f89f36 185 unsigned seq;
719d93cd 186 int idx;
721eecbf 187
bd2b53b2 188 if (flags & POLLIN) {
719d93cd 189 idx = srcu_read_lock(&kvm->irq_srcu);
56f89f36
PM
190 do {
191 seq = read_seqcount_begin(&irqfd->irq_entry_sc);
192 irq = irqfd->irq_entry;
193 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
721eecbf 194 /* An event has been signaled, inject an interrupt */
56f89f36
PM
195 if (irq.type == KVM_IRQ_ROUTING_MSI)
196 kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
aa2fbe6d 197 false);
bd2b53b2
MT
198 else
199 schedule_work(&irqfd->inject);
719d93cd 200 srcu_read_unlock(&kvm->irq_srcu, idx);
bd2b53b2 201 }
721eecbf
GH
202
203 if (flags & POLLHUP) {
204 /* The eventfd is closing, detach from KVM */
721eecbf
GH
205 unsigned long flags;
206
207 spin_lock_irqsave(&kvm->irqfds.lock, flags);
208
209 /*
210 * We must check if someone deactivated the irqfd before
211 * we could acquire the irqfds.lock since the item is
212 * deactivated from the KVM side before it is unhooked from
213 * the wait-queue. If it is already deactivated, we can
214 * simply return knowing the other side will cleanup for us.
215 * We cannot race against the irqfd going away since the
216 * other side is required to acquire wqh->lock, which we hold
217 */
218 if (irqfd_is_active(irqfd))
219 irqfd_deactivate(irqfd);
220
221 spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
222 }
223
224 return 0;
225}
226
227static void
228irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
229 poll_table *pt)
230{
166c9775
EA
231 struct kvm_kernel_irqfd *irqfd =
232 container_of(pt, struct kvm_kernel_irqfd, pt);
721eecbf
GH
233 add_wait_queue(wqh, &irqfd->wait);
234}
235
bd2b53b2 236/* Must be called under irqfds.lock */
166c9775 237static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
bd2b53b2
MT
238{
239 struct kvm_kernel_irq_routing_entry *e;
8ba918d4 240 struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
351dc647 241 int n_entries;
8ba918d4 242
9957c86d 243 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
bd2b53b2 244
56f89f36
PM
245 write_seqcount_begin(&irqfd->irq_entry_sc);
246
8ba918d4 247 e = entries;
351dc647
AS
248 if (n_entries == 1)
249 irqfd->irq_entry = *e;
250 else
251 irqfd->irq_entry.type = 0;
56f89f36 252
56f89f36 253 write_seqcount_end(&irqfd->irq_entry_sc);
bd2b53b2
MT
254}
255
1a02b270
EA
256#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
257void __attribute__((weak)) kvm_arch_irq_bypass_stop(
258 struct irq_bypass_consumer *cons)
259{
260}
261
262void __attribute__((weak)) kvm_arch_irq_bypass_start(
263 struct irq_bypass_consumer *cons)
264{
265}
f70c20aa
FW
266
267int __attribute__((weak)) kvm_arch_update_irqfd_routing(
268 struct kvm *kvm, unsigned int host_irq,
269 uint32_t guest_irq, bool set)
270{
271 return 0;
272}
1a02b270
EA
273#endif
274
721eecbf 275static int
d4db2935 276kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
721eecbf 277{
166c9775 278 struct kvm_kernel_irqfd *irqfd, *tmp;
cffe78d9 279 struct fd f;
7a84428a 280 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
721eecbf
GH
281 int ret;
282 unsigned int events;
9957c86d 283 int idx;
721eecbf 284
01c94e64
EA
285 if (!kvm_arch_intc_initialized(kvm))
286 return -EAGAIN;
287
721eecbf
GH
288 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
289 if (!irqfd)
290 return -ENOMEM;
291
292 irqfd->kvm = kvm;
d4db2935 293 irqfd->gsi = args->gsi;
721eecbf
GH
294 INIT_LIST_HEAD(&irqfd->list);
295 INIT_WORK(&irqfd->inject, irqfd_inject);
296 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
56f89f36 297 seqcount_init(&irqfd->irq_entry_sc);
721eecbf 298
cffe78d9
AV
299 f = fdget(args->fd);
300 if (!f.file) {
301 ret = -EBADF;
302 goto out;
721eecbf
GH
303 }
304
cffe78d9 305 eventfd = eventfd_ctx_fileget(f.file);
721eecbf
GH
306 if (IS_ERR(eventfd)) {
307 ret = PTR_ERR(eventfd);
308 goto fail;
309 }
310
311 irqfd->eventfd = eventfd;
312
7a84428a 313 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
166c9775 314 struct kvm_kernel_irqfd_resampler *resampler;
7a84428a
AW
315
316 resamplefd = eventfd_ctx_fdget(args->resamplefd);
317 if (IS_ERR(resamplefd)) {
318 ret = PTR_ERR(resamplefd);
319 goto fail;
320 }
321
322 irqfd->resamplefd = resamplefd;
323 INIT_LIST_HEAD(&irqfd->resampler_link);
324
325 mutex_lock(&kvm->irqfds.resampler_lock);
326
327 list_for_each_entry(resampler,
49f8a1a5 328 &kvm->irqfds.resampler_list, link) {
7a84428a
AW
329 if (resampler->notifier.gsi == irqfd->gsi) {
330 irqfd->resampler = resampler;
331 break;
332 }
333 }
334
335 if (!irqfd->resampler) {
336 resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
337 if (!resampler) {
338 ret = -ENOMEM;
339 mutex_unlock(&kvm->irqfds.resampler_lock);
340 goto fail;
341 }
342
343 resampler->kvm = kvm;
344 INIT_LIST_HEAD(&resampler->list);
345 resampler->notifier.gsi = irqfd->gsi;
346 resampler->notifier.irq_acked = irqfd_resampler_ack;
347 INIT_LIST_HEAD(&resampler->link);
348
349 list_add(&resampler->link, &kvm->irqfds.resampler_list);
350 kvm_register_irq_ack_notifier(kvm,
351 &resampler->notifier);
352 irqfd->resampler = resampler;
353 }
354
355 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
719d93cd 356 synchronize_srcu(&kvm->irq_srcu);
7a84428a
AW
357
358 mutex_unlock(&kvm->irqfds.resampler_lock);
359 }
360
721eecbf
GH
361 /*
362 * Install our own custom wake-up handling so we are notified via
363 * a callback whenever someone signals the underlying eventfd
364 */
365 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
366 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
367
f1d1c309
MT
368 spin_lock_irq(&kvm->irqfds.lock);
369
370 ret = 0;
371 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
372 if (irqfd->eventfd != tmp->eventfd)
373 continue;
374 /* This fd is used for another irq already. */
375 ret = -EBUSY;
376 spin_unlock_irq(&kvm->irqfds.lock);
377 goto fail;
378 }
379
9957c86d
PM
380 idx = srcu_read_lock(&kvm->irq_srcu);
381 irqfd_update(kvm, irqfd);
382 srcu_read_unlock(&kvm->irq_srcu, idx);
bd2b53b2 383
721eecbf 384 list_add_tail(&irqfd->list, &kvm->irqfds.items);
721eecbf 385
684a0b71
CH
386 spin_unlock_irq(&kvm->irqfds.lock);
387
721eecbf
GH
388 /*
389 * Check if there was an event already pending on the eventfd
390 * before we registered, and trigger it as if we didn't miss it.
391 */
684a0b71
CH
392 events = f.file->f_op->poll(f.file, &irqfd->pt);
393
721eecbf
GH
394 if (events & POLLIN)
395 schedule_work(&irqfd->inject);
396
397 /*
398 * do not drop the file until the irqfd is fully initialized, otherwise
399 * we might race against the POLLHUP
400 */
cffe78d9 401 fdput(f);
9016cfb5
EA
402#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
403 irqfd->consumer.token = (void *)irqfd->eventfd;
404 irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
405 irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
406 irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
407 irqfd->consumer.start = kvm_arch_irq_bypass_start;
408 ret = irq_bypass_register_consumer(&irqfd->consumer);
409 if (ret)
410 pr_info("irq bypass consumer (token %p) registration fails: %d\n",
411 irqfd->consumer.token, ret);
412#endif
721eecbf
GH
413
414 return 0;
415
416fail:
7a84428a
AW
417 if (irqfd->resampler)
418 irqfd_resampler_shutdown(irqfd);
419
420 if (resamplefd && !IS_ERR(resamplefd))
421 eventfd_ctx_put(resamplefd);
422
721eecbf
GH
423 if (eventfd && !IS_ERR(eventfd))
424 eventfd_ctx_put(eventfd);
425
cffe78d9 426 fdput(f);
721eecbf 427
cffe78d9 428out:
721eecbf
GH
429 kfree(irqfd);
430 return ret;
431}
c77dcacb
PB
432
433bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
434{
435 struct kvm_irq_ack_notifier *kian;
436 int gsi, idx;
437
438 idx = srcu_read_lock(&kvm->irq_srcu);
439 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
440 if (gsi != -1)
441 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
442 link)
443 if (kian->gsi == gsi) {
444 srcu_read_unlock(&kvm->irq_srcu, idx);
445 return true;
446 }
447
448 srcu_read_unlock(&kvm->irq_srcu, idx);
449
450 return false;
451}
452EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
453
ba1aefcd 454void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
c77dcacb
PB
455{
456 struct kvm_irq_ack_notifier *kian;
ba1aefcd
AS
457
458 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
459 link)
460 if (kian->gsi == gsi)
461 kian->irq_acked(kian);
462}
463
464void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
465{
c77dcacb
PB
466 int gsi, idx;
467
468 trace_kvm_ack_irq(irqchip, pin);
469
470 idx = srcu_read_lock(&kvm->irq_srcu);
471 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
472 if (gsi != -1)
ba1aefcd 473 kvm_notify_acked_gsi(kvm, gsi);
c77dcacb
PB
474 srcu_read_unlock(&kvm->irq_srcu, idx);
475}
476
477void kvm_register_irq_ack_notifier(struct kvm *kvm,
478 struct kvm_irq_ack_notifier *kian)
479{
480 mutex_lock(&kvm->irq_lock);
481 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
482 mutex_unlock(&kvm->irq_lock);
c77dcacb 483 kvm_vcpu_request_scan_ioapic(kvm);
c77dcacb
PB
484}
485
486void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
487 struct kvm_irq_ack_notifier *kian)
488{
489 mutex_lock(&kvm->irq_lock);
490 hlist_del_init_rcu(&kian->link);
491 mutex_unlock(&kvm->irq_lock);
492 synchronize_srcu(&kvm->irq_srcu);
c77dcacb 493 kvm_vcpu_request_scan_ioapic(kvm);
c77dcacb 494}
914daba8 495#endif
721eecbf
GH
496
497void
d34e6b17 498kvm_eventfd_init(struct kvm *kvm)
721eecbf 499{
297e2105 500#ifdef CONFIG_HAVE_KVM_IRQFD
721eecbf
GH
501 spin_lock_init(&kvm->irqfds.lock);
502 INIT_LIST_HEAD(&kvm->irqfds.items);
7a84428a
AW
503 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
504 mutex_init(&kvm->irqfds.resampler_lock);
914daba8 505#endif
d34e6b17 506 INIT_LIST_HEAD(&kvm->ioeventfds);
721eecbf
GH
507}
508
297e2105 509#ifdef CONFIG_HAVE_KVM_IRQFD
721eecbf
GH
510/*
511 * shutdown any irqfd's that match fd+gsi
512 */
513static int
d4db2935 514kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
721eecbf 515{
166c9775 516 struct kvm_kernel_irqfd *irqfd, *tmp;
721eecbf
GH
517 struct eventfd_ctx *eventfd;
518
d4db2935 519 eventfd = eventfd_ctx_fdget(args->fd);
721eecbf
GH
520 if (IS_ERR(eventfd))
521 return PTR_ERR(eventfd);
522
523 spin_lock_irq(&kvm->irqfds.lock);
524
525 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
d4db2935 526 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
bd2b53b2 527 /*
56f89f36 528 * This clearing of irq_entry.type is needed for when
c8ce057e
MT
529 * another thread calls kvm_irq_routing_update before
530 * we flush workqueue below (we synchronize with
531 * kvm_irq_routing_update using irqfds.lock).
bd2b53b2 532 */
56f89f36
PM
533 write_seqcount_begin(&irqfd->irq_entry_sc);
534 irqfd->irq_entry.type = 0;
535 write_seqcount_end(&irqfd->irq_entry_sc);
721eecbf 536 irqfd_deactivate(irqfd);
bd2b53b2 537 }
721eecbf
GH
538 }
539
540 spin_unlock_irq(&kvm->irqfds.lock);
541 eventfd_ctx_put(eventfd);
542
543 /*
544 * Block until we know all outstanding shutdown jobs have completed
545 * so that we guarantee there will not be any more interrupts on this
546 * gsi once this deassign function returns.
547 */
548 flush_workqueue(irqfd_cleanup_wq);
549
550 return 0;
551}
552
553int
d4db2935 554kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
721eecbf 555{
7a84428a 556 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
326cf033
AW
557 return -EINVAL;
558
d4db2935
AW
559 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
560 return kvm_irqfd_deassign(kvm, args);
721eecbf 561
d4db2935 562 return kvm_irqfd_assign(kvm, args);
721eecbf
GH
563}
564
565/*
566 * This function is called as the kvm VM fd is being released. Shutdown all
567 * irqfds that still remain open
568 */
569void
570kvm_irqfd_release(struct kvm *kvm)
571{
166c9775 572 struct kvm_kernel_irqfd *irqfd, *tmp;
721eecbf
GH
573
574 spin_lock_irq(&kvm->irqfds.lock);
575
576 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
577 irqfd_deactivate(irqfd);
578
579 spin_unlock_irq(&kvm->irqfds.lock);
580
581 /*
582 * Block until we know all outstanding shutdown jobs have completed
583 * since we do not take a kvm* reference.
584 */
585 flush_workqueue(irqfd_cleanup_wq);
586
587}
588
bd2b53b2 589/*
9957c86d 590 * Take note of a change in irq routing.
719d93cd 591 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
bd2b53b2 592 */
9957c86d 593void kvm_irq_routing_update(struct kvm *kvm)
bd2b53b2 594{
166c9775 595 struct kvm_kernel_irqfd *irqfd;
bd2b53b2
MT
596
597 spin_lock_irq(&kvm->irqfds.lock);
598
f70c20aa 599 list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
9957c86d 600 irqfd_update(kvm, irqfd);
bd2b53b2 601
f70c20aa
FW
602#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
603 if (irqfd->producer) {
604 int ret = kvm_arch_update_irqfd_routing(
605 irqfd->kvm, irqfd->producer->irq,
606 irqfd->gsi, 1);
607 WARN_ON(ret);
608 }
609#endif
610 }
611
bd2b53b2
MT
612 spin_unlock_irq(&kvm->irqfds.lock);
613}
614
721eecbf
GH
615/*
616 * create a host-wide workqueue for issuing deferred shutdown requests
617 * aggregated from all vm* instances. We need our own isolated single-thread
618 * queue to prevent deadlock against flushing the normal work-queue.
619 */
a0f155e9 620int kvm_irqfd_init(void)
721eecbf
GH
621{
622 irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
623 if (!irqfd_cleanup_wq)
624 return -ENOMEM;
625
626 return 0;
627}
628
a0f155e9 629void kvm_irqfd_exit(void)
721eecbf
GH
630{
631 destroy_workqueue(irqfd_cleanup_wq);
632}
914daba8 633#endif
d34e6b17
GH
634
635/*
636 * --------------------------------------------------------------------
637 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
638 *
639 * userspace can register a PIO/MMIO address with an eventfd for receiving
640 * notification when the memory has been touched.
641 * --------------------------------------------------------------------
642 */
643
644struct _ioeventfd {
645 struct list_head list;
646 u64 addr;
647 int length;
648 struct eventfd_ctx *eventfd;
649 u64 datamatch;
650 struct kvm_io_device dev;
05e07f9b 651 u8 bus_idx;
d34e6b17
GH
652 bool wildcard;
653};
654
655static inline struct _ioeventfd *
656to_ioeventfd(struct kvm_io_device *dev)
657{
658 return container_of(dev, struct _ioeventfd, dev);
659}
660
661static void
662ioeventfd_release(struct _ioeventfd *p)
663{
664 eventfd_ctx_put(p->eventfd);
665 list_del(&p->list);
666 kfree(p);
667}
668
669static bool
670ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
671{
672 u64 _val;
673
f848a5a8
MT
674 if (addr != p->addr)
675 /* address must be precise for a hit */
676 return false;
677
678 if (!p->length)
679 /* length = 0 means only look at the address, so always a hit */
680 return true;
681
682 if (len != p->length)
d34e6b17
GH
683 /* address-range must be precise for a hit */
684 return false;
685
686 if (p->wildcard)
687 /* all else equal, wildcard is always a hit */
688 return true;
689
690 /* otherwise, we have to actually compare the data */
691
692 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
693
694 switch (len) {
695 case 1:
696 _val = *(u8 *)val;
697 break;
698 case 2:
699 _val = *(u16 *)val;
700 break;
701 case 4:
702 _val = *(u32 *)val;
703 break;
704 case 8:
705 _val = *(u64 *)val;
706 break;
707 default:
708 return false;
709 }
710
711 return _val == p->datamatch ? true : false;
712}
713
714/* MMIO/PIO writes trigger an event if the addr/val match */
715static int
e32edf4f
NN
716ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
717 int len, const void *val)
d34e6b17
GH
718{
719 struct _ioeventfd *p = to_ioeventfd(this);
720
721 if (!ioeventfd_in_range(p, addr, len, val))
722 return -EOPNOTSUPP;
723
724 eventfd_signal(p->eventfd, 1);
725 return 0;
726}
727
728/*
729 * This function is called as KVM is completely shutting down. We do not
730 * need to worry about locking just nuke anything we have as quickly as possible
731 */
732static void
733ioeventfd_destructor(struct kvm_io_device *this)
734{
735 struct _ioeventfd *p = to_ioeventfd(this);
736
737 ioeventfd_release(p);
738}
739
740static const struct kvm_io_device_ops ioeventfd_ops = {
741 .write = ioeventfd_write,
742 .destructor = ioeventfd_destructor,
743};
744
745/* assumes kvm->slots_lock held */
746static bool
747ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
748{
749 struct _ioeventfd *_p;
750
751 list_for_each_entry(_p, &kvm->ioeventfds, list)
05e07f9b 752 if (_p->bus_idx == p->bus_idx &&
f848a5a8
MT
753 _p->addr == p->addr &&
754 (!_p->length || !p->length ||
755 (_p->length == p->length &&
756 (_p->wildcard || p->wildcard ||
757 _p->datamatch == p->datamatch))))
d34e6b17
GH
758 return true;
759
760 return false;
761}
762
2b83451b
CH
763static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
764{
765 if (flags & KVM_IOEVENTFD_FLAG_PIO)
766 return KVM_PIO_BUS;
767 if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
768 return KVM_VIRTIO_CCW_NOTIFY_BUS;
769 return KVM_MMIO_BUS;
770}
771
85da11ca
JW
772static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
773 enum kvm_bus bus_idx,
774 struct kvm_ioeventfd *args)
d34e6b17 775{
d34e6b17 776
85da11ca
JW
777 struct eventfd_ctx *eventfd;
778 struct _ioeventfd *p;
779 int ret;
f848a5a8 780
d34e6b17
GH
781 eventfd = eventfd_ctx_fdget(args->fd);
782 if (IS_ERR(eventfd))
783 return PTR_ERR(eventfd);
784
785 p = kzalloc(sizeof(*p), GFP_KERNEL);
786 if (!p) {
787 ret = -ENOMEM;
788 goto fail;
789 }
790
791 INIT_LIST_HEAD(&p->list);
792 p->addr = args->addr;
05e07f9b 793 p->bus_idx = bus_idx;
d34e6b17
GH
794 p->length = args->len;
795 p->eventfd = eventfd;
796
797 /* The datamatch feature is optional, otherwise this is a wildcard */
798 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
799 p->datamatch = args->datamatch;
800 else
801 p->wildcard = true;
802
79fac95e 803 mutex_lock(&kvm->slots_lock);
d34e6b17 804
25985edc 805 /* Verify that there isn't a match already */
d34e6b17
GH
806 if (ioeventfd_check_collision(kvm, p)) {
807 ret = -EEXIST;
808 goto unlock_fail;
809 }
810
811 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
812
743eeb0b
SL
813 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
814 &p->dev);
d34e6b17
GH
815 if (ret < 0)
816 goto unlock_fail;
817
6ea34c9b 818 kvm->buses[bus_idx]->ioeventfd_count++;
d34e6b17
GH
819 list_add_tail(&p->list, &kvm->ioeventfds);
820
79fac95e 821 mutex_unlock(&kvm->slots_lock);
d34e6b17
GH
822
823 return 0;
824
825unlock_fail:
79fac95e 826 mutex_unlock(&kvm->slots_lock);
d34e6b17
GH
827
828fail:
829 kfree(p);
830 eventfd_ctx_put(eventfd);
831
832 return ret;
833}
834
835static int
85da11ca
JW
836kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
837 struct kvm_ioeventfd *args)
d34e6b17 838{
d34e6b17
GH
839 struct _ioeventfd *p, *tmp;
840 struct eventfd_ctx *eventfd;
841 int ret = -ENOENT;
842
843 eventfd = eventfd_ctx_fdget(args->fd);
844 if (IS_ERR(eventfd))
845 return PTR_ERR(eventfd);
846
79fac95e 847 mutex_lock(&kvm->slots_lock);
d34e6b17
GH
848
849 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
850 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
851
05e07f9b
MT
852 if (p->bus_idx != bus_idx ||
853 p->eventfd != eventfd ||
d34e6b17
GH
854 p->addr != args->addr ||
855 p->length != args->len ||
856 p->wildcard != wildcard)
857 continue;
858
859 if (!p->wildcard && p->datamatch != args->datamatch)
860 continue;
861
e93f8a0f 862 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
6ea34c9b 863 kvm->buses[bus_idx]->ioeventfd_count--;
d34e6b17
GH
864 ioeventfd_release(p);
865 ret = 0;
866 break;
867 }
868
79fac95e 869 mutex_unlock(&kvm->slots_lock);
d34e6b17
GH
870
871 eventfd_ctx_put(eventfd);
872
873 return ret;
874}
875
85da11ca
JW
876static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
877{
878 enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
eefd6b06
JW
879 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
880
881 if (!args->len && bus_idx == KVM_MMIO_BUS)
882 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
85da11ca 883
eefd6b06 884 return ret;
85da11ca
JW
885}
886
887static int
888kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
889{
890 enum kvm_bus bus_idx;
eefd6b06 891 int ret;
85da11ca
JW
892
893 bus_idx = ioeventfd_bus_from_flags(args->flags);
894 /* must be natural-word sized, or 0 to ignore length */
895 switch (args->len) {
896 case 0:
897 case 1:
898 case 2:
899 case 4:
900 case 8:
901 break;
902 default:
903 return -EINVAL;
904 }
905
906 /* check for range overflow */
907 if (args->addr + args->len < args->addr)
908 return -EINVAL;
909
910 /* check for extra flags that we don't understand */
911 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
912 return -EINVAL;
913
914 /* ioeventfd with no length can't be combined with DATAMATCH */
e9ea5069 915 if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
85da11ca
JW
916 return -EINVAL;
917
eefd6b06
JW
918 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
919 if (ret)
920 goto fail;
921
922 /* When length is ignored, MMIO is also put on a separate bus, for
923 * faster lookups.
924 */
925 if (!args->len && bus_idx == KVM_MMIO_BUS) {
926 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
927 if (ret < 0)
928 goto fast_fail;
929 }
930
931 return 0;
932
933fast_fail:
934 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
935fail:
936 return ret;
85da11ca
JW
937}
938
d34e6b17
GH
939int
940kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
941{
942 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
943 return kvm_deassign_ioeventfd(kvm, args);
944
945 return kvm_assign_ioeventfd(kvm, args);
946}
This page took 0.351848 seconds and 5 git commands to generate.