Merge branch 'drm-etnaviv-fixes' of git://git.pengutronix.de/git/lst/linux into drm...
[deliverable/linux.git] / drivers / vhost / vhost.c
CommitLineData
3a4d5c94
MT
1/* Copyright (C) 2009 Red Hat, Inc.
2 * Copyright (C) 2006 Rusty Russell IBM Corporation
3 *
4 * Author: Michael S. Tsirkin <mst@redhat.com>
5 *
6 * Inspiration, some code, and most witty comments come from
61516587 7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
3a4d5c94
MT
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2.
10 *
11 * Generic code for virtio server in host kernel.
12 */
13
14#include <linux/eventfd.h>
15#include <linux/vhost.h>
35596b27 16#include <linux/uio.h>
3a4d5c94 17#include <linux/mm.h>
64e1c807 18#include <linux/mmu_context.h>
3a4d5c94
MT
19#include <linux/miscdevice.h>
20#include <linux/mutex.h>
3a4d5c94
MT
21#include <linux/poll.h>
22#include <linux/file.h>
23#include <linux/highmem.h>
5a0e3ad6 24#include <linux/slab.h>
4de7255f 25#include <linux/vmalloc.h>
c23f3445 26#include <linux/kthread.h>
9e3d1957 27#include <linux/cgroup.h>
6ac1afbf 28#include <linux/module.h>
bcfeacab 29#include <linux/sort.h>
a9709d68 30#include <linux/interval_tree_generic.h>
3a4d5c94 31
3a4d5c94
MT
32#include "vhost.h"
33
c9ce42f7
IM
34static ushort max_mem_regions = 64;
35module_param(max_mem_regions, ushort, 0444);
36MODULE_PARM_DESC(max_mem_regions,
37 "Maximum number of memory regions in memory map. (default: 64)");
6b1e6cc7
JW
38static int max_iotlb_entries = 2048;
39module_param(max_iotlb_entries, int, 0444);
40MODULE_PARM_DESC(max_iotlb_entries,
41 "Maximum number of iotlb entries. (default: 2048)");
c9ce42f7 42
3a4d5c94 43enum {
3a4d5c94
MT
44 VHOST_MEMORY_F_LOG = 0x1,
45};
46
3b1bbe89
MT
47#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
48#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
8ea8cf89 49
a9709d68
JW
50INTERVAL_TREE_DEFINE(struct vhost_umem_node,
51 rb, __u64, __subtree_last,
52 START, LAST, , vhost_umem_interval_tree);
53
2751c988 54#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
c5072037 55static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
2751c988
GK
56{
57 vq->user_be = !virtio_legacy_is_little_endian();
58}
59
c5072037
GK
60static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
61{
62 vq->user_be = true;
63}
64
65static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
66{
67 vq->user_be = false;
68}
69
2751c988
GK
70static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
71{
72 struct vhost_vring_state s;
73
74 if (vq->private_data)
75 return -EBUSY;
76
77 if (copy_from_user(&s, argp, sizeof(s)))
78 return -EFAULT;
79
80 if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
81 s.num != VHOST_VRING_BIG_ENDIAN)
82 return -EINVAL;
83
c5072037
GK
84 if (s.num == VHOST_VRING_BIG_ENDIAN)
85 vhost_enable_cross_endian_big(vq);
86 else
87 vhost_enable_cross_endian_little(vq);
2751c988
GK
88
89 return 0;
90}
91
92static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
93 int __user *argp)
94{
95 struct vhost_vring_state s = {
96 .index = idx,
97 .num = vq->user_be
98 };
99
100 if (copy_to_user(argp, &s, sizeof(s)))
101 return -EFAULT;
102
103 return 0;
104}
105
106static void vhost_init_is_le(struct vhost_virtqueue *vq)
107{
108 /* Note for legacy virtio: user_be is initialized at reset time
109 * according to the host endianness. If userspace does not set an
110 * explicit endianness, the default behavior is native endian, as
111 * expected by legacy virtio.
112 */
113 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
114}
115#else
c5072037 116static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
2751c988
GK
117{
118}
119
120static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
121{
122 return -ENOIOCTLCMD;
123}
124
125static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
126 int __user *argp)
127{
128 return -ENOIOCTLCMD;
129}
130
131static void vhost_init_is_le(struct vhost_virtqueue *vq)
132{
133 if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
134 vq->is_le = true;
135}
136#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
137
c5072037
GK
138static void vhost_reset_is_le(struct vhost_virtqueue *vq)
139{
140 vq->is_le = virtio_legacy_is_little_endian();
141}
142
7235acdb
JW
143struct vhost_flush_struct {
144 struct vhost_work work;
145 struct completion wait_event;
146};
147
148static void vhost_flush_work(struct vhost_work *work)
149{
150 struct vhost_flush_struct *s;
151
152 s = container_of(work, struct vhost_flush_struct, work);
153 complete(&s->wait_event);
154}
155
3a4d5c94
MT
156static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
157 poll_table *pt)
158{
159 struct vhost_poll *poll;
3a4d5c94 160
d47effe1 161 poll = container_of(pt, struct vhost_poll, table);
3a4d5c94
MT
162 poll->wqh = wqh;
163 add_wait_queue(wqh, &poll->wait);
164}
165
166static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
167 void *key)
168{
c23f3445
TH
169 struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
170
3a4d5c94
MT
171 if (!((unsigned long)key & poll->mask))
172 return 0;
173
c23f3445 174 vhost_poll_queue(poll);
3a4d5c94
MT
175 return 0;
176}
177
163049ae 178void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
87d6a412 179{
04b96e55 180 clear_bit(VHOST_WORK_QUEUED, &work->flags);
87d6a412
MT
181 work->fn = fn;
182 init_waitqueue_head(&work->done);
87d6a412 183}
6ac1afbf 184EXPORT_SYMBOL_GPL(vhost_work_init);
87d6a412 185
3a4d5c94 186/* Init poll structure */
c23f3445
TH
187void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
188 unsigned long mask, struct vhost_dev *dev)
3a4d5c94 189{
3a4d5c94
MT
190 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
191 init_poll_funcptr(&poll->table, vhost_poll_func);
192 poll->mask = mask;
c23f3445 193 poll->dev = dev;
2b8b328b 194 poll->wqh = NULL;
c23f3445 195
87d6a412 196 vhost_work_init(&poll->work, fn);
3a4d5c94 197}
6ac1afbf 198EXPORT_SYMBOL_GPL(vhost_poll_init);
3a4d5c94
MT
199
200/* Start polling a file. We add ourselves to file's wait queue. The caller must
201 * keep a reference to a file until after vhost_poll_stop is called. */
2b8b328b 202int vhost_poll_start(struct vhost_poll *poll, struct file *file)
3a4d5c94
MT
203{
204 unsigned long mask;
2b8b328b 205 int ret = 0;
d47effe1 206
70181d51
JW
207 if (poll->wqh)
208 return 0;
209
3a4d5c94
MT
210 mask = file->f_op->poll(file, &poll->table);
211 if (mask)
212 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
2b8b328b
JW
213 if (mask & POLLERR) {
214 if (poll->wqh)
215 remove_wait_queue(poll->wqh, &poll->wait);
216 ret = -EINVAL;
217 }
218
219 return ret;
3a4d5c94 220}
6ac1afbf 221EXPORT_SYMBOL_GPL(vhost_poll_start);
3a4d5c94
MT
222
223/* Stop polling a file. After this function returns, it becomes safe to drop the
224 * file reference. You must also flush afterwards. */
225void vhost_poll_stop(struct vhost_poll *poll)
226{
2b8b328b
JW
227 if (poll->wqh) {
228 remove_wait_queue(poll->wqh, &poll->wait);
229 poll->wqh = NULL;
230 }
3a4d5c94 231}
6ac1afbf 232EXPORT_SYMBOL_GPL(vhost_poll_stop);
3a4d5c94 233
7235acdb 234void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
0174b0c3 235{
7235acdb 236 struct vhost_flush_struct flush;
d47effe1 237
7235acdb
JW
238 if (dev->worker) {
239 init_completion(&flush.wait_event);
240 vhost_work_init(&flush.work, vhost_flush_work);
0174b0c3 241
7235acdb
JW
242 vhost_work_queue(dev, &flush.work);
243 wait_for_completion(&flush.wait_event);
244 }
3a4d5c94 245}
6ac1afbf 246EXPORT_SYMBOL_GPL(vhost_work_flush);
3a4d5c94 247
87d6a412
MT
248/* Flush any work that has been scheduled. When calling this, don't hold any
249 * locks that are also used by the callback. */
250void vhost_poll_flush(struct vhost_poll *poll)
251{
252 vhost_work_flush(poll->dev, &poll->work);
253}
6ac1afbf 254EXPORT_SYMBOL_GPL(vhost_poll_flush);
87d6a412 255
163049ae 256void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
3a4d5c94 257{
04b96e55
JW
258 if (!dev->worker)
259 return;
c23f3445 260
04b96e55
JW
261 if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
262 /* We can only add the work to the list after we're
263 * sure it was not in the list.
264 */
265 smp_mb();
266 llist_add(&work->node, &dev->work_list);
c23f3445
TH
267 wake_up_process(dev->worker);
268 }
3a4d5c94 269}
6ac1afbf 270EXPORT_SYMBOL_GPL(vhost_work_queue);
3a4d5c94 271
526d3e7f
JW
272/* A lockless hint for busy polling code to exit the loop */
273bool vhost_has_work(struct vhost_dev *dev)
274{
04b96e55 275 return !llist_empty(&dev->work_list);
526d3e7f
JW
276}
277EXPORT_SYMBOL_GPL(vhost_has_work);
278
87d6a412
MT
279void vhost_poll_queue(struct vhost_poll *poll)
280{
281 vhost_work_queue(poll->dev, &poll->work);
282}
6ac1afbf 283EXPORT_SYMBOL_GPL(vhost_poll_queue);
87d6a412 284
3a4d5c94
MT
285static void vhost_vq_reset(struct vhost_dev *dev,
286 struct vhost_virtqueue *vq)
287{
288 vq->num = 1;
289 vq->desc = NULL;
290 vq->avail = NULL;
291 vq->used = NULL;
292 vq->last_avail_idx = 0;
293 vq->avail_idx = 0;
294 vq->last_used_idx = 0;
8ea8cf89
MT
295 vq->signalled_used = 0;
296 vq->signalled_used_valid = false;
3a4d5c94 297 vq->used_flags = 0;
3a4d5c94
MT
298 vq->log_used = false;
299 vq->log_addr = -1ull;
3a4d5c94 300 vq->private_data = NULL;
ea16c514 301 vq->acked_features = 0;
3a4d5c94
MT
302 vq->log_base = NULL;
303 vq->error_ctx = NULL;
304 vq->error = NULL;
305 vq->kick = NULL;
306 vq->call_ctx = NULL;
307 vq->call = NULL;
73a99f08 308 vq->log_ctx = NULL;
c5072037
GK
309 vhost_reset_is_le(vq);
310 vhost_disable_cross_endian(vq);
03088137 311 vq->busyloop_timeout = 0;
a9709d68 312 vq->umem = NULL;
6b1e6cc7 313 vq->iotlb = NULL;
3a4d5c94
MT
314}
315
c23f3445
TH
316static int vhost_worker(void *data)
317{
318 struct vhost_dev *dev = data;
04b96e55
JW
319 struct vhost_work *work, *work_next;
320 struct llist_node *node;
d7ffde35 321 mm_segment_t oldfs = get_fs();
c23f3445 322
d7ffde35 323 set_fs(USER_DS);
64e1c807
MT
324 use_mm(dev->mm);
325
c23f3445
TH
326 for (;;) {
327 /* mb paired w/ kthread_stop */
328 set_current_state(TASK_INTERRUPTIBLE);
329
c23f3445 330 if (kthread_should_stop()) {
c23f3445 331 __set_current_state(TASK_RUNNING);
64e1c807 332 break;
c23f3445 333 }
04b96e55
JW
334
335 node = llist_del_all(&dev->work_list);
336 if (!node)
337 schedule();
338
339 node = llist_reverse_order(node);
340 /* make sure flag is seen after deletion */
341 smp_wmb();
342 llist_for_each_entry_safe(work, work_next, node, node) {
343 clear_bit(VHOST_WORK_QUEUED, &work->flags);
c23f3445
TH
344 __set_current_state(TASK_RUNNING);
345 work->fn(work);
d550dda1
NHE
346 if (need_resched())
347 schedule();
04b96e55 348 }
c23f3445 349 }
64e1c807 350 unuse_mm(dev->mm);
d7ffde35 351 set_fs(oldfs);
64e1c807 352 return 0;
c23f3445
TH
353}
354
bab632d6
MT
355static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
356{
357 kfree(vq->indirect);
358 vq->indirect = NULL;
359 kfree(vq->log);
360 vq->log = NULL;
361 kfree(vq->heads);
362 vq->heads = NULL;
bab632d6
MT
363}
364
e0e9b406
JW
365/* Helper to allocate iovec buffers for all vqs. */
366static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
367{
6d5e6aa8 368 struct vhost_virtqueue *vq;
e0e9b406 369 int i;
d47effe1 370
e0e9b406 371 for (i = 0; i < dev->nvqs; ++i) {
6d5e6aa8
AH
372 vq = dev->vqs[i];
373 vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV,
374 GFP_KERNEL);
375 vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL);
376 vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL);
377 if (!vq->indirect || !vq->log || !vq->heads)
e0e9b406
JW
378 goto err_nomem;
379 }
380 return 0;
d47effe1 381
e0e9b406 382err_nomem:
bab632d6 383 for (; i >= 0; --i)
3ab2e420 384 vhost_vq_free_iovecs(dev->vqs[i]);
e0e9b406
JW
385 return -ENOMEM;
386}
387
388static void vhost_dev_free_iovecs(struct vhost_dev *dev)
389{
390 int i;
d47effe1 391
bab632d6 392 for (i = 0; i < dev->nvqs; ++i)
3ab2e420 393 vhost_vq_free_iovecs(dev->vqs[i]);
e0e9b406
JW
394}
395
59566b6e 396void vhost_dev_init(struct vhost_dev *dev,
3ab2e420 397 struct vhost_virtqueue **vqs, int nvqs)
3a4d5c94 398{
6d5e6aa8 399 struct vhost_virtqueue *vq;
3a4d5c94 400 int i;
c23f3445 401
3a4d5c94
MT
402 dev->vqs = vqs;
403 dev->nvqs = nvqs;
404 mutex_init(&dev->mutex);
405 dev->log_ctx = NULL;
406 dev->log_file = NULL;
a9709d68 407 dev->umem = NULL;
6b1e6cc7 408 dev->iotlb = NULL;
3a4d5c94 409 dev->mm = NULL;
c23f3445 410 dev->worker = NULL;
04b96e55 411 init_llist_head(&dev->work_list);
6b1e6cc7
JW
412 init_waitqueue_head(&dev->wait);
413 INIT_LIST_HEAD(&dev->read_list);
414 INIT_LIST_HEAD(&dev->pending_list);
415 spin_lock_init(&dev->iotlb_lock);
04b96e55 416
3a4d5c94
MT
417
418 for (i = 0; i < dev->nvqs; ++i) {
6d5e6aa8
AH
419 vq = dev->vqs[i];
420 vq->log = NULL;
421 vq->indirect = NULL;
422 vq->heads = NULL;
423 vq->dev = dev;
424 mutex_init(&vq->mutex);
425 vhost_vq_reset(dev, vq);
426 if (vq->handle_kick)
427 vhost_poll_init(&vq->poll, vq->handle_kick,
428 POLLIN, dev);
3a4d5c94 429 }
3a4d5c94 430}
6ac1afbf 431EXPORT_SYMBOL_GPL(vhost_dev_init);
3a4d5c94
MT
432
433/* Caller should have device mutex */
434long vhost_dev_check_owner(struct vhost_dev *dev)
435{
436 /* Are you the owner? If not, I don't think you mean to do that */
437 return dev->mm == current->mm ? 0 : -EPERM;
438}
6ac1afbf 439EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
3a4d5c94 440
87d6a412 441struct vhost_attach_cgroups_struct {
d47effe1
KK
442 struct vhost_work work;
443 struct task_struct *owner;
444 int ret;
87d6a412
MT
445};
446
447static void vhost_attach_cgroups_work(struct vhost_work *work)
448{
d47effe1
KK
449 struct vhost_attach_cgroups_struct *s;
450
451 s = container_of(work, struct vhost_attach_cgroups_struct, work);
452 s->ret = cgroup_attach_task_all(s->owner, current);
87d6a412
MT
453}
454
455static int vhost_attach_cgroups(struct vhost_dev *dev)
456{
d47effe1
KK
457 struct vhost_attach_cgroups_struct attach;
458
459 attach.owner = current;
460 vhost_work_init(&attach.work, vhost_attach_cgroups_work);
461 vhost_work_queue(dev, &attach.work);
462 vhost_work_flush(dev, &attach.work);
463 return attach.ret;
87d6a412
MT
464}
465
05c05351
MT
466/* Caller should have device mutex */
467bool vhost_dev_has_owner(struct vhost_dev *dev)
468{
469 return dev->mm;
470}
6ac1afbf 471EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
05c05351 472
3a4d5c94 473/* Caller should have device mutex */
54db63c2 474long vhost_dev_set_owner(struct vhost_dev *dev)
3a4d5c94 475{
c23f3445
TH
476 struct task_struct *worker;
477 int err;
d47effe1 478
3a4d5c94 479 /* Is there an owner already? */
05c05351 480 if (vhost_dev_has_owner(dev)) {
c23f3445
TH
481 err = -EBUSY;
482 goto err_mm;
483 }
d47effe1 484
3a4d5c94
MT
485 /* No owner, become one */
486 dev->mm = get_task_mm(current);
c23f3445
TH
487 worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
488 if (IS_ERR(worker)) {
489 err = PTR_ERR(worker);
490 goto err_worker;
491 }
492
493 dev->worker = worker;
87d6a412
MT
494 wake_up_process(worker); /* avoid contributing to loadavg */
495
496 err = vhost_attach_cgroups(dev);
9e3d1957
MT
497 if (err)
498 goto err_cgroup;
c23f3445 499
e0e9b406
JW
500 err = vhost_dev_alloc_iovecs(dev);
501 if (err)
502 goto err_cgroup;
503
3a4d5c94 504 return 0;
9e3d1957
MT
505err_cgroup:
506 kthread_stop(worker);
615cc221 507 dev->worker = NULL;
c23f3445
TH
508err_worker:
509 if (dev->mm)
510 mmput(dev->mm);
511 dev->mm = NULL;
512err_mm:
513 return err;
3a4d5c94 514}
6ac1afbf 515EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
3a4d5c94 516
a9709d68 517static void *vhost_kvzalloc(unsigned long size)
3a4d5c94 518{
a9709d68
JW
519 void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
520
521 if (!n)
522 n = vzalloc(size);
523 return n;
524}
525
526struct vhost_umem *vhost_dev_reset_owner_prepare(void)
527{
528 return vhost_kvzalloc(sizeof(struct vhost_umem));
150b9e51 529}
6ac1afbf 530EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
3a4d5c94 531
150b9e51 532/* Caller should have device mutex */
a9709d68 533void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem)
150b9e51 534{
47283bef
MT
535 int i;
536
ea5d4046 537 vhost_dev_cleanup(dev, true);
3a4d5c94 538
150b9e51 539 /* Restore memory to default empty mapping. */
a9709d68
JW
540 INIT_LIST_HEAD(&umem->umem_list);
541 dev->umem = umem;
47283bef
MT
542 /* We don't need VQ locks below since vhost_dev_cleanup makes sure
543 * VQs aren't running.
544 */
545 for (i = 0; i < dev->nvqs; ++i)
a9709d68 546 dev->vqs[i]->umem = umem;
3a4d5c94 547}
6ac1afbf 548EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
3a4d5c94 549
b211616d 550void vhost_dev_stop(struct vhost_dev *dev)
bab632d6
MT
551{
552 int i;
b211616d
MT
553
554 for (i = 0; i < dev->nvqs; ++i) {
3ab2e420
AH
555 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
556 vhost_poll_stop(&dev->vqs[i]->poll);
557 vhost_poll_flush(&dev->vqs[i]->poll);
b211616d 558 }
bab632d6 559 }
bab632d6 560}
6ac1afbf 561EXPORT_SYMBOL_GPL(vhost_dev_stop);
bab632d6 562
6b1e6cc7
JW
563static void vhost_umem_free(struct vhost_umem *umem,
564 struct vhost_umem_node *node)
565{
566 vhost_umem_interval_tree_remove(node, &umem->umem_tree);
567 list_del(&node->link);
568 kfree(node);
569 umem->numem--;
570}
571
a9709d68
JW
572static void vhost_umem_clean(struct vhost_umem *umem)
573{
574 struct vhost_umem_node *node, *tmp;
575
576 if (!umem)
577 return;
578
6b1e6cc7
JW
579 list_for_each_entry_safe(node, tmp, &umem->umem_list, link)
580 vhost_umem_free(umem, node);
581
a9709d68
JW
582 kvfree(umem);
583}
584
6b1e6cc7
JW
585static void vhost_clear_msg(struct vhost_dev *dev)
586{
587 struct vhost_msg_node *node, *n;
588
589 spin_lock(&dev->iotlb_lock);
590
591 list_for_each_entry_safe(node, n, &dev->read_list, node) {
592 list_del(&node->node);
593 kfree(node);
594 }
595
596 list_for_each_entry_safe(node, n, &dev->pending_list, node) {
597 list_del(&node->node);
598 kfree(node);
599 }
600
601 spin_unlock(&dev->iotlb_lock);
602}
603
ea5d4046
MT
604/* Caller should have device mutex if and only if locked is set */
605void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
3a4d5c94
MT
606{
607 int i;
d47effe1 608
3a4d5c94 609 for (i = 0; i < dev->nvqs; ++i) {
3ab2e420
AH
610 if (dev->vqs[i]->error_ctx)
611 eventfd_ctx_put(dev->vqs[i]->error_ctx);
612 if (dev->vqs[i]->error)
613 fput(dev->vqs[i]->error);
614 if (dev->vqs[i]->kick)
615 fput(dev->vqs[i]->kick);
616 if (dev->vqs[i]->call_ctx)
617 eventfd_ctx_put(dev->vqs[i]->call_ctx);
618 if (dev->vqs[i]->call)
619 fput(dev->vqs[i]->call);
620 vhost_vq_reset(dev, dev->vqs[i]);
3a4d5c94 621 }
e0e9b406 622 vhost_dev_free_iovecs(dev);
3a4d5c94
MT
623 if (dev->log_ctx)
624 eventfd_ctx_put(dev->log_ctx);
625 dev->log_ctx = NULL;
626 if (dev->log_file)
627 fput(dev->log_file);
628 dev->log_file = NULL;
629 /* No one will access memory at this point */
a9709d68
JW
630 vhost_umem_clean(dev->umem);
631 dev->umem = NULL;
6b1e6cc7
JW
632 vhost_umem_clean(dev->iotlb);
633 dev->iotlb = NULL;
634 vhost_clear_msg(dev);
635 wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM);
04b96e55 636 WARN_ON(!llist_empty(&dev->work_list));
78b620ce
ED
637 if (dev->worker) {
638 kthread_stop(dev->worker);
639 dev->worker = NULL;
640 }
533a19b4
MT
641 if (dev->mm)
642 mmput(dev->mm);
643 dev->mm = NULL;
3a4d5c94 644}
6ac1afbf 645EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
3a4d5c94
MT
646
647static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
648{
649 u64 a = addr / VHOST_PAGE_SIZE / 8;
d47effe1 650
3a4d5c94
MT
651 /* Make sure 64 bit math will not overflow. */
652 if (a > ULONG_MAX - (unsigned long)log_base ||
653 a + (unsigned long)log_base > ULONG_MAX)
6d97e55f 654 return 0;
3a4d5c94
MT
655
656 return access_ok(VERIFY_WRITE, log_base + a,
657 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
658}
659
ec33d031
MT
660static bool vhost_overflow(u64 uaddr, u64 size)
661{
662 /* Make sure 64 bit math will not overflow. */
663 return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
664}
665
3a4d5c94 666/* Caller should have vq mutex and device mutex. */
a9709d68 667static int vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
3a4d5c94
MT
668 int log_all)
669{
a9709d68 670 struct vhost_umem_node *node;
179b284e 671
a9709d68 672 if (!umem)
f8322fbe 673 return 0;
179b284e 674
a9709d68
JW
675 list_for_each_entry(node, &umem->umem_list, link) {
676 unsigned long a = node->userspace_addr;
677
ec33d031 678 if (vhost_overflow(node->userspace_addr, node->size))
3a4d5c94 679 return 0;
ec33d031
MT
680
681
682 if (!access_ok(VERIFY_WRITE, (void __user *)a,
a9709d68 683 node->size))
3a4d5c94
MT
684 return 0;
685 else if (log_all && !log_access_ok(log_base,
a9709d68
JW
686 node->start,
687 node->size))
3a4d5c94
MT
688 return 0;
689 }
690 return 1;
691}
692
693/* Can we switch to this memory table? */
694/* Caller should have device mutex but not vq mutex */
a9709d68 695static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
3a4d5c94
MT
696 int log_all)
697{
698 int i;
d47effe1 699
3a4d5c94
MT
700 for (i = 0; i < d->nvqs; ++i) {
701 int ok;
ea16c514
MT
702 bool log;
703
3ab2e420 704 mutex_lock(&d->vqs[i]->mutex);
ea16c514 705 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
3a4d5c94 706 /* If ring is inactive, will check when it's enabled. */
3ab2e420 707 if (d->vqs[i]->private_data)
a9709d68
JW
708 ok = vq_memory_access_ok(d->vqs[i]->log_base,
709 umem, log);
3a4d5c94
MT
710 else
711 ok = 1;
3ab2e420 712 mutex_unlock(&d->vqs[i]->mutex);
3a4d5c94
MT
713 if (!ok)
714 return 0;
715 }
716 return 1;
717}
718
6b1e6cc7
JW
719static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
720 struct iovec iov[], int iov_size, int access);
bfe2bc51
JW
721
722static int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to,
723 const void *from, unsigned size)
724{
6b1e6cc7 725 int ret;
bfe2bc51 726
6b1e6cc7
JW
727 if (!vq->iotlb)
728 return __copy_to_user(to, from, size);
729 else {
730 /* This function should be called after iotlb
731 * prefetch, which means we're sure that all vq
732 * could be access through iotlb. So -EAGAIN should
733 * not happen in this case.
734 */
735 /* TODO: more fast path */
736 struct iov_iter t;
737 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
738 ARRAY_SIZE(vq->iotlb_iov),
739 VHOST_ACCESS_WO);
740 if (ret < 0)
741 goto out;
742 iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
743 ret = copy_to_iter(from, size, &t);
744 if (ret == size)
745 ret = 0;
746 }
747out:
748 return ret;
749}
bfe2bc51
JW
750
751static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
752 void *from, unsigned size)
753{
6b1e6cc7
JW
754 int ret;
755
756 if (!vq->iotlb)
757 return __copy_from_user(to, from, size);
758 else {
759 /* This function should be called after iotlb
760 * prefetch, which means we're sure that vq
761 * could be access through iotlb. So -EAGAIN should
762 * not happen in this case.
763 */
764 /* TODO: more fast path */
765 struct iov_iter f;
766 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
767 ARRAY_SIZE(vq->iotlb_iov),
768 VHOST_ACCESS_RO);
769 if (ret < 0) {
770 vq_err(vq, "IOTLB translation failure: uaddr "
771 "%p size 0x%llx\n", from,
772 (unsigned long long) size);
773 goto out;
774 }
775 iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
776 ret = copy_from_iter(to, size, &f);
777 if (ret == size)
778 ret = 0;
779 }
780
781out:
782 return ret;
783}
784
785static void __user *__vhost_get_user(struct vhost_virtqueue *vq,
786 void *addr, unsigned size)
787{
788 int ret;
789
790 /* This function should be called after iotlb
791 * prefetch, which means we're sure that vq
792 * could be access through iotlb. So -EAGAIN should
793 * not happen in this case.
794 */
795 /* TODO: more fast path */
796 ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
797 ARRAY_SIZE(vq->iotlb_iov),
798 VHOST_ACCESS_RO);
799 if (ret < 0) {
800 vq_err(vq, "IOTLB translation failure: uaddr "
801 "%p size 0x%llx\n", addr,
802 (unsigned long long) size);
803 return NULL;
804 }
805
806 if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
807 vq_err(vq, "Non atomic userspace memory access: uaddr "
808 "%p size 0x%llx\n", addr,
809 (unsigned long long) size);
810 return NULL;
811 }
812
813 return vq->iotlb_iov[0].iov_base;
814}
815
816#define vhost_put_user(vq, x, ptr) \
817({ \
818 int ret = -EFAULT; \
819 if (!vq->iotlb) { \
820 ret = __put_user(x, ptr); \
821 } else { \
822 __typeof__(ptr) to = \
823 (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \
824 if (to != NULL) \
825 ret = __put_user(x, to); \
826 else \
827 ret = -EFAULT; \
828 } \
829 ret; \
830})
831
832#define vhost_get_user(vq, x, ptr) \
833({ \
834 int ret; \
835 if (!vq->iotlb) { \
836 ret = __get_user(x, ptr); \
837 } else { \
838 __typeof__(ptr) from = \
839 (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \
840 if (from != NULL) \
841 ret = __get_user(x, from); \
842 else \
843 ret = -EFAULT; \
844 } \
845 ret; \
846})
847
848static void vhost_dev_lock_vqs(struct vhost_dev *d)
849{
850 int i = 0;
851 for (i = 0; i < d->nvqs; ++i)
852 mutex_lock(&d->vqs[i]->mutex);
853}
854
855static void vhost_dev_unlock_vqs(struct vhost_dev *d)
856{
857 int i = 0;
858 for (i = 0; i < d->nvqs; ++i)
859 mutex_unlock(&d->vqs[i]->mutex);
860}
861
862static int vhost_new_umem_range(struct vhost_umem *umem,
863 u64 start, u64 size, u64 end,
864 u64 userspace_addr, int perm)
865{
866 struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
867
868 if (!node)
869 return -ENOMEM;
870
871 if (umem->numem == max_iotlb_entries) {
872 tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link);
873 vhost_umem_free(umem, tmp);
874 }
875
876 node->start = start;
877 node->size = size;
878 node->last = end;
879 node->userspace_addr = userspace_addr;
880 node->perm = perm;
881 INIT_LIST_HEAD(&node->link);
882 list_add_tail(&node->link, &umem->umem_list);
883 vhost_umem_interval_tree_insert(node, &umem->umem_tree);
884 umem->numem++;
885
886 return 0;
887}
888
889static void vhost_del_umem_range(struct vhost_umem *umem,
890 u64 start, u64 end)
891{
892 struct vhost_umem_node *node;
893
894 while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
895 start, end)))
896 vhost_umem_free(umem, node);
897}
898
899static void vhost_iotlb_notify_vq(struct vhost_dev *d,
900 struct vhost_iotlb_msg *msg)
901{
902 struct vhost_msg_node *node, *n;
903
904 spin_lock(&d->iotlb_lock);
905
906 list_for_each_entry_safe(node, n, &d->pending_list, node) {
907 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
908 if (msg->iova <= vq_msg->iova &&
909 msg->iova + msg->size - 1 > vq_msg->iova &&
910 vq_msg->type == VHOST_IOTLB_MISS) {
911 vhost_poll_queue(&node->vq->poll);
912 list_del(&node->node);
913 kfree(node);
914 }
915 }
916
917 spin_unlock(&d->iotlb_lock);
918}
919
920static int umem_access_ok(u64 uaddr, u64 size, int access)
921{
922 unsigned long a = uaddr;
923
ec33d031
MT
924 /* Make sure 64 bit math will not overflow. */
925 if (vhost_overflow(uaddr, size))
926 return -EFAULT;
927
6b1e6cc7
JW
928 if ((access & VHOST_ACCESS_RO) &&
929 !access_ok(VERIFY_READ, (void __user *)a, size))
930 return -EFAULT;
931 if ((access & VHOST_ACCESS_WO) &&
932 !access_ok(VERIFY_WRITE, (void __user *)a, size))
933 return -EFAULT;
934 return 0;
935}
936
937int vhost_process_iotlb_msg(struct vhost_dev *dev,
938 struct vhost_iotlb_msg *msg)
939{
940 int ret = 0;
941
942 vhost_dev_lock_vqs(dev);
943 switch (msg->type) {
944 case VHOST_IOTLB_UPDATE:
945 if (!dev->iotlb) {
946 ret = -EFAULT;
947 break;
948 }
949 if (umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
950 ret = -EFAULT;
951 break;
952 }
953 if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size,
954 msg->iova + msg->size - 1,
955 msg->uaddr, msg->perm)) {
956 ret = -ENOMEM;
957 break;
958 }
959 vhost_iotlb_notify_vq(dev, msg);
960 break;
961 case VHOST_IOTLB_INVALIDATE:
962 vhost_del_umem_range(dev->iotlb, msg->iova,
963 msg->iova + msg->size - 1);
964 break;
965 default:
966 ret = -EINVAL;
967 break;
968 }
969
970 vhost_dev_unlock_vqs(dev);
971 return ret;
972}
973ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
974 struct iov_iter *from)
975{
976 struct vhost_msg_node node;
977 unsigned size = sizeof(struct vhost_msg);
978 size_t ret;
979 int err;
980
981 if (iov_iter_count(from) < size)
982 return 0;
983 ret = copy_from_iter(&node.msg, size, from);
984 if (ret != size)
985 goto done;
986
987 switch (node.msg.type) {
988 case VHOST_IOTLB_MSG:
989 err = vhost_process_iotlb_msg(dev, &node.msg.iotlb);
990 if (err)
991 ret = err;
992 break;
993 default:
994 ret = -EINVAL;
995 break;
996 }
997
998done:
999 return ret;
1000}
1001EXPORT_SYMBOL(vhost_chr_write_iter);
1002
1003unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
1004 poll_table *wait)
1005{
1006 unsigned int mask = 0;
1007
1008 poll_wait(file, &dev->wait, wait);
1009
1010 if (!list_empty(&dev->read_list))
1011 mask |= POLLIN | POLLRDNORM;
1012
1013 return mask;
1014}
1015EXPORT_SYMBOL(vhost_chr_poll);
1016
1017ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1018 int noblock)
1019{
1020 DEFINE_WAIT(wait);
1021 struct vhost_msg_node *node;
1022 ssize_t ret = 0;
1023 unsigned size = sizeof(struct vhost_msg);
1024
1025 if (iov_iter_count(to) < size)
1026 return 0;
1027
1028 while (1) {
1029 if (!noblock)
1030 prepare_to_wait(&dev->wait, &wait,
1031 TASK_INTERRUPTIBLE);
1032
1033 node = vhost_dequeue_msg(dev, &dev->read_list);
1034 if (node)
1035 break;
1036 if (noblock) {
1037 ret = -EAGAIN;
1038 break;
1039 }
1040 if (signal_pending(current)) {
1041 ret = -ERESTARTSYS;
1042 break;
1043 }
1044 if (!dev->iotlb) {
1045 ret = -EBADFD;
1046 break;
1047 }
1048
1049 schedule();
1050 }
1051
1052 if (!noblock)
1053 finish_wait(&dev->wait, &wait);
1054
1055 if (node) {
1056 ret = copy_to_iter(&node->msg, size, to);
1057
1058 if (ret != size || node->msg.type != VHOST_IOTLB_MISS) {
1059 kfree(node);
1060 return ret;
1061 }
1062
1063 vhost_enqueue_msg(dev, &dev->pending_list, node);
1064 }
1065
1066 return ret;
1067}
1068EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1069
1070static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1071{
1072 struct vhost_dev *dev = vq->dev;
1073 struct vhost_msg_node *node;
1074 struct vhost_iotlb_msg *msg;
1075
1076 node = vhost_new_msg(vq, VHOST_IOTLB_MISS);
1077 if (!node)
1078 return -ENOMEM;
1079
1080 msg = &node->msg.iotlb;
1081 msg->type = VHOST_IOTLB_MISS;
1082 msg->iova = iova;
1083 msg->perm = access;
1084
1085 vhost_enqueue_msg(dev, &dev->read_list, node);
1086
1087 return 0;
bfe2bc51
JW
1088}
1089
ea16c514 1090static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
3a4d5c94
MT
1091 struct vring_desc __user *desc,
1092 struct vring_avail __user *avail,
1093 struct vring_used __user *used)
6b1e6cc7 1094
3a4d5c94 1095{
ea16c514 1096 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
6b1e6cc7 1097
3a4d5c94
MT
1098 return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
1099 access_ok(VERIFY_READ, avail,
8ea8cf89 1100 sizeof *avail + num * sizeof *avail->ring + s) &&
3a4d5c94 1101 access_ok(VERIFY_WRITE, used,
8ea8cf89 1102 sizeof *used + num * sizeof *used->ring + s);
3a4d5c94
MT
1103}
1104
6b1e6cc7
JW
1105static int iotlb_access_ok(struct vhost_virtqueue *vq,
1106 int access, u64 addr, u64 len)
1107{
1108 const struct vhost_umem_node *node;
1109 struct vhost_umem *umem = vq->iotlb;
1110 u64 s = 0, size;
1111
1112 while (len > s) {
1113 node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
1114 addr,
1115 addr + len - 1);
1116 if (node == NULL || node->start > addr) {
1117 vhost_iotlb_miss(vq, addr, access);
1118 return false;
1119 } else if (!(node->perm & access)) {
1120 /* Report the possible access violation by
1121 * request another translation from userspace.
1122 */
1123 return false;
1124 }
1125
1126 size = node->size - addr + node->start;
1127 s += size;
1128 addr += size;
1129 }
1130
1131 return true;
1132}
1133
1134int vq_iotlb_prefetch(struct vhost_virtqueue *vq)
1135{
1136 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
1137 unsigned int num = vq->num;
1138
1139 if (!vq->iotlb)
1140 return 1;
1141
1142 return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
1143 num * sizeof *vq->desc) &&
1144 iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail,
1145 sizeof *vq->avail +
1146 num * sizeof *vq->avail->ring + s) &&
1147 iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used,
1148 sizeof *vq->used +
1149 num * sizeof *vq->used->ring + s);
1150}
1151EXPORT_SYMBOL_GPL(vq_iotlb_prefetch);
1152
3a4d5c94
MT
1153/* Can we log writes? */
1154/* Caller should have device mutex but not vq mutex */
1155int vhost_log_access_ok(struct vhost_dev *dev)
1156{
a9709d68 1157 return memory_access_ok(dev, dev->umem, 1);
3a4d5c94 1158}
6ac1afbf 1159EXPORT_SYMBOL_GPL(vhost_log_access_ok);
3a4d5c94
MT
1160
1161/* Verify access for write logging. */
1162/* Caller should have vq mutex and device mutex */
ea16c514 1163static int vq_log_access_ok(struct vhost_virtqueue *vq,
8ea8cf89 1164 void __user *log_base)
3a4d5c94 1165{
ea16c514 1166 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
28457ee6 1167
a9709d68 1168 return vq_memory_access_ok(log_base, vq->umem,
ea16c514 1169 vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
3a4d5c94
MT
1170 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
1171 sizeof *vq->used +
8ea8cf89 1172 vq->num * sizeof *vq->used->ring + s));
3a4d5c94
MT
1173}
1174
1175/* Can we start vq? */
1176/* Caller should have vq mutex and device mutex */
1177int vhost_vq_access_ok(struct vhost_virtqueue *vq)
1178{
6b1e6cc7
JW
1179 if (vq->iotlb) {
1180 /* When device IOTLB was used, the access validation
1181 * will be validated during prefetching.
1182 */
1183 return 1;
1184 }
ea16c514
MT
1185 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
1186 vq_log_access_ok(vq, vq->log_base);
3a4d5c94 1187}
6ac1afbf 1188EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
3a4d5c94 1189
6b1e6cc7
JW
1190static struct vhost_umem *vhost_umem_alloc(void)
1191{
1192 struct vhost_umem *umem = vhost_kvzalloc(sizeof(*umem));
1193
1194 if (!umem)
1195 return NULL;
1196
1197 umem->umem_tree = RB_ROOT;
1198 umem->numem = 0;
1199 INIT_LIST_HEAD(&umem->umem_list);
1200
1201 return umem;
1202}
1203
3a4d5c94
MT
1204static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1205{
a9709d68
JW
1206 struct vhost_memory mem, *newmem;
1207 struct vhost_memory_region *region;
a9709d68 1208 struct vhost_umem *newumem, *oldumem;
3a4d5c94 1209 unsigned long size = offsetof(struct vhost_memory, regions);
98f9ca0a 1210 int i;
d47effe1 1211
7ad9c9d2
TY
1212 if (copy_from_user(&mem, m, size))
1213 return -EFAULT;
3a4d5c94
MT
1214 if (mem.padding)
1215 return -EOPNOTSUPP;
c9ce42f7 1216 if (mem.nregions > max_mem_regions)
3a4d5c94 1217 return -E2BIG;
4de7255f 1218 newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
3a4d5c94
MT
1219 if (!newmem)
1220 return -ENOMEM;
1221
1222 memcpy(newmem, &mem, size);
7ad9c9d2
TY
1223 if (copy_from_user(newmem->regions, m->regions,
1224 mem.nregions * sizeof *m->regions)) {
bcfeacab 1225 kvfree(newmem);
7ad9c9d2 1226 return -EFAULT;
3a4d5c94
MT
1227 }
1228
6b1e6cc7 1229 newumem = vhost_umem_alloc();
a9709d68 1230 if (!newumem) {
4de7255f 1231 kvfree(newmem);
a9709d68
JW
1232 return -ENOMEM;
1233 }
1234
a9709d68
JW
1235 for (region = newmem->regions;
1236 region < newmem->regions + mem.nregions;
1237 region++) {
6b1e6cc7
JW
1238 if (vhost_new_umem_range(newumem,
1239 region->guest_phys_addr,
1240 region->memory_size,
1241 region->guest_phys_addr +
1242 region->memory_size - 1,
1243 region->userspace_addr,
1244 VHOST_ACCESS_RW))
a9709d68 1245 goto err;
a02c3789 1246 }
a9709d68
JW
1247
1248 if (!memory_access_ok(d, newumem, 0))
1249 goto err;
1250
1251 oldumem = d->umem;
1252 d->umem = newumem;
98f9ca0a 1253
47283bef 1254 /* All memory accesses are done under some VQ mutex. */
98f9ca0a
MT
1255 for (i = 0; i < d->nvqs; ++i) {
1256 mutex_lock(&d->vqs[i]->mutex);
a9709d68 1257 d->vqs[i]->umem = newumem;
98f9ca0a
MT
1258 mutex_unlock(&d->vqs[i]->mutex);
1259 }
a9709d68
JW
1260
1261 kvfree(newmem);
1262 vhost_umem_clean(oldumem);
3a4d5c94 1263 return 0;
a9709d68
JW
1264
1265err:
1266 vhost_umem_clean(newumem);
1267 kvfree(newmem);
1268 return -EFAULT;
3a4d5c94
MT
1269}
1270
935cdee7 1271long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
3a4d5c94 1272{
cecb46f1
AV
1273 struct file *eventfp, *filep = NULL;
1274 bool pollstart = false, pollstop = false;
3a4d5c94
MT
1275 struct eventfd_ctx *ctx = NULL;
1276 u32 __user *idxp = argp;
1277 struct vhost_virtqueue *vq;
1278 struct vhost_vring_state s;
1279 struct vhost_vring_file f;
1280 struct vhost_vring_addr a;
1281 u32 idx;
1282 long r;
1283
1284 r = get_user(idx, idxp);
1285 if (r < 0)
1286 return r;
0f3d9a17 1287 if (idx >= d->nvqs)
3a4d5c94
MT
1288 return -ENOBUFS;
1289
3ab2e420 1290 vq = d->vqs[idx];
3a4d5c94
MT
1291
1292 mutex_lock(&vq->mutex);
1293
1294 switch (ioctl) {
1295 case VHOST_SET_VRING_NUM:
1296 /* Resizing ring with an active backend?
1297 * You don't want to do that. */
1298 if (vq->private_data) {
1299 r = -EBUSY;
1300 break;
1301 }
7ad9c9d2
TY
1302 if (copy_from_user(&s, argp, sizeof s)) {
1303 r = -EFAULT;
3a4d5c94 1304 break;
7ad9c9d2 1305 }
3a4d5c94
MT
1306 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
1307 r = -EINVAL;
1308 break;
1309 }
1310 vq->num = s.num;
1311 break;
1312 case VHOST_SET_VRING_BASE:
1313 /* Moving base with an active backend?
1314 * You don't want to do that. */
1315 if (vq->private_data) {
1316 r = -EBUSY;
1317 break;
1318 }
7ad9c9d2
TY
1319 if (copy_from_user(&s, argp, sizeof s)) {
1320 r = -EFAULT;
3a4d5c94 1321 break;
7ad9c9d2 1322 }
3a4d5c94
MT
1323 if (s.num > 0xffff) {
1324 r = -EINVAL;
1325 break;
1326 }
1327 vq->last_avail_idx = s.num;
1328 /* Forget the cached index value. */
1329 vq->avail_idx = vq->last_avail_idx;
1330 break;
1331 case VHOST_GET_VRING_BASE:
1332 s.index = idx;
1333 s.num = vq->last_avail_idx;
7ad9c9d2
TY
1334 if (copy_to_user(argp, &s, sizeof s))
1335 r = -EFAULT;
3a4d5c94
MT
1336 break;
1337 case VHOST_SET_VRING_ADDR:
7ad9c9d2
TY
1338 if (copy_from_user(&a, argp, sizeof a)) {
1339 r = -EFAULT;
3a4d5c94 1340 break;
7ad9c9d2 1341 }
3a4d5c94
MT
1342 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
1343 r = -EOPNOTSUPP;
1344 break;
1345 }
1346 /* For 32bit, verify that the top 32bits of the user
1347 data are set to zero. */
1348 if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
1349 (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
1350 (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
1351 r = -EFAULT;
1352 break;
1353 }
5d9a07b0
MT
1354
1355 /* Make sure it's safe to cast pointers to vring types. */
1356 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
1357 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
1358 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
1359 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
d5424838 1360 (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) {
3a4d5c94
MT
1361 r = -EINVAL;
1362 break;
1363 }
1364
1365 /* We only verify access here if backend is configured.
1366 * If it is not, we don't as size might not have been setup.
1367 * We will verify when backend is configured. */
1368 if (vq->private_data) {
ea16c514 1369 if (!vq_access_ok(vq, vq->num,
3a4d5c94
MT
1370 (void __user *)(unsigned long)a.desc_user_addr,
1371 (void __user *)(unsigned long)a.avail_user_addr,
1372 (void __user *)(unsigned long)a.used_user_addr)) {
1373 r = -EINVAL;
1374 break;
1375 }
1376
1377 /* Also validate log access for used ring if enabled. */
1378 if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
1379 !log_access_ok(vq->log_base, a.log_guest_addr,
1380 sizeof *vq->used +
1381 vq->num * sizeof *vq->used->ring)) {
1382 r = -EINVAL;
1383 break;
1384 }
1385 }
1386
3a4d5c94
MT
1387 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
1388 vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
1389 vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
1390 vq->log_addr = a.log_guest_addr;
1391 vq->used = (void __user *)(unsigned long)a.used_user_addr;
1392 break;
1393 case VHOST_SET_VRING_KICK:
7ad9c9d2
TY
1394 if (copy_from_user(&f, argp, sizeof f)) {
1395 r = -EFAULT;
3a4d5c94 1396 break;
7ad9c9d2 1397 }
3a4d5c94 1398 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
535297a6
MT
1399 if (IS_ERR(eventfp)) {
1400 r = PTR_ERR(eventfp);
1401 break;
1402 }
3a4d5c94 1403 if (eventfp != vq->kick) {
cecb46f1
AV
1404 pollstop = (filep = vq->kick) != NULL;
1405 pollstart = (vq->kick = eventfp) != NULL;
3a4d5c94
MT
1406 } else
1407 filep = eventfp;
1408 break;
1409 case VHOST_SET_VRING_CALL:
7ad9c9d2
TY
1410 if (copy_from_user(&f, argp, sizeof f)) {
1411 r = -EFAULT;
3a4d5c94 1412 break;
7ad9c9d2 1413 }
3a4d5c94 1414 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
535297a6
MT
1415 if (IS_ERR(eventfp)) {
1416 r = PTR_ERR(eventfp);
1417 break;
1418 }
3a4d5c94
MT
1419 if (eventfp != vq->call) {
1420 filep = vq->call;
1421 ctx = vq->call_ctx;
1422 vq->call = eventfp;
1423 vq->call_ctx = eventfp ?
1424 eventfd_ctx_fileget(eventfp) : NULL;
1425 } else
1426 filep = eventfp;
1427 break;
1428 case VHOST_SET_VRING_ERR:
7ad9c9d2
TY
1429 if (copy_from_user(&f, argp, sizeof f)) {
1430 r = -EFAULT;
3a4d5c94 1431 break;
7ad9c9d2 1432 }
3a4d5c94 1433 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
535297a6
MT
1434 if (IS_ERR(eventfp)) {
1435 r = PTR_ERR(eventfp);
1436 break;
1437 }
3a4d5c94
MT
1438 if (eventfp != vq->error) {
1439 filep = vq->error;
1440 vq->error = eventfp;
1441 ctx = vq->error_ctx;
1442 vq->error_ctx = eventfp ?
1443 eventfd_ctx_fileget(eventfp) : NULL;
1444 } else
1445 filep = eventfp;
1446 break;
2751c988
GK
1447 case VHOST_SET_VRING_ENDIAN:
1448 r = vhost_set_vring_endian(vq, argp);
1449 break;
1450 case VHOST_GET_VRING_ENDIAN:
1451 r = vhost_get_vring_endian(vq, idx, argp);
1452 break;
03088137
JW
1453 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
1454 if (copy_from_user(&s, argp, sizeof(s))) {
1455 r = -EFAULT;
1456 break;
1457 }
1458 vq->busyloop_timeout = s.num;
1459 break;
1460 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
1461 s.index = idx;
1462 s.num = vq->busyloop_timeout;
1463 if (copy_to_user(argp, &s, sizeof(s)))
1464 r = -EFAULT;
1465 break;
3a4d5c94
MT
1466 default:
1467 r = -ENOIOCTLCMD;
1468 }
1469
1470 if (pollstop && vq->handle_kick)
1471 vhost_poll_stop(&vq->poll);
1472
1473 if (ctx)
1474 eventfd_ctx_put(ctx);
1475 if (filep)
1476 fput(filep);
1477
1478 if (pollstart && vq->handle_kick)
2b8b328b 1479 r = vhost_poll_start(&vq->poll, vq->kick);
3a4d5c94
MT
1480
1481 mutex_unlock(&vq->mutex);
1482
1483 if (pollstop && vq->handle_kick)
1484 vhost_poll_flush(&vq->poll);
1485 return r;
1486}
6ac1afbf 1487EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
3a4d5c94 1488
6b1e6cc7
JW
1489int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
1490{
1491 struct vhost_umem *niotlb, *oiotlb;
1492 int i;
1493
1494 niotlb = vhost_umem_alloc();
1495 if (!niotlb)
1496 return -ENOMEM;
1497
1498 oiotlb = d->iotlb;
1499 d->iotlb = niotlb;
1500
1501 for (i = 0; i < d->nvqs; ++i) {
1502 mutex_lock(&d->vqs[i]->mutex);
1503 d->vqs[i]->iotlb = niotlb;
1504 mutex_unlock(&d->vqs[i]->mutex);
1505 }
1506
1507 vhost_umem_clean(oiotlb);
1508
1509 return 0;
1510}
1511EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
1512
3a4d5c94 1513/* Caller must have device mutex */
935cdee7 1514long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
3a4d5c94 1515{
3a4d5c94
MT
1516 struct file *eventfp, *filep = NULL;
1517 struct eventfd_ctx *ctx = NULL;
1518 u64 p;
1519 long r;
1520 int i, fd;
1521
1522 /* If you are not the owner, you can become one */
1523 if (ioctl == VHOST_SET_OWNER) {
1524 r = vhost_dev_set_owner(d);
1525 goto done;
1526 }
1527
1528 /* You must be the owner to do anything else */
1529 r = vhost_dev_check_owner(d);
1530 if (r)
1531 goto done;
1532
1533 switch (ioctl) {
1534 case VHOST_SET_MEM_TABLE:
1535 r = vhost_set_memory(d, argp);
1536 break;
1537 case VHOST_SET_LOG_BASE:
7ad9c9d2
TY
1538 if (copy_from_user(&p, argp, sizeof p)) {
1539 r = -EFAULT;
3a4d5c94 1540 break;
7ad9c9d2 1541 }
3a4d5c94
MT
1542 if ((u64)(unsigned long)p != p) {
1543 r = -EFAULT;
1544 break;
1545 }
1546 for (i = 0; i < d->nvqs; ++i) {
1547 struct vhost_virtqueue *vq;
1548 void __user *base = (void __user *)(unsigned long)p;
3ab2e420 1549 vq = d->vqs[i];
3a4d5c94
MT
1550 mutex_lock(&vq->mutex);
1551 /* If ring is inactive, will check when it's enabled. */
ea16c514 1552 if (vq->private_data && !vq_log_access_ok(vq, base))
3a4d5c94
MT
1553 r = -EFAULT;
1554 else
1555 vq->log_base = base;
1556 mutex_unlock(&vq->mutex);
1557 }
1558 break;
1559 case VHOST_SET_LOG_FD:
1560 r = get_user(fd, (int __user *)argp);
1561 if (r < 0)
1562 break;
1563 eventfp = fd == -1 ? NULL : eventfd_fget(fd);
1564 if (IS_ERR(eventfp)) {
1565 r = PTR_ERR(eventfp);
1566 break;
1567 }
1568 if (eventfp != d->log_file) {
1569 filep = d->log_file;
7932c0bd 1570 d->log_file = eventfp;
3a4d5c94
MT
1571 ctx = d->log_ctx;
1572 d->log_ctx = eventfp ?
1573 eventfd_ctx_fileget(eventfp) : NULL;
1574 } else
1575 filep = eventfp;
1576 for (i = 0; i < d->nvqs; ++i) {
3ab2e420
AH
1577 mutex_lock(&d->vqs[i]->mutex);
1578 d->vqs[i]->log_ctx = d->log_ctx;
1579 mutex_unlock(&d->vqs[i]->mutex);
3a4d5c94
MT
1580 }
1581 if (ctx)
1582 eventfd_ctx_put(ctx);
1583 if (filep)
1584 fput(filep);
1585 break;
1586 default:
935cdee7 1587 r = -ENOIOCTLCMD;
3a4d5c94
MT
1588 break;
1589 }
1590done:
1591 return r;
1592}
6ac1afbf 1593EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
3a4d5c94 1594
3a4d5c94
MT
1595/* TODO: This is really inefficient. We need something like get_user()
1596 * (instruction directly accesses the data, with an exception table entry
1597 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
1598 */
1599static int set_bit_to_user(int nr, void __user *addr)
1600{
1601 unsigned long log = (unsigned long)addr;
1602 struct page *page;
1603 void *base;
1604 int bit = nr + (log % PAGE_SIZE) * 8;
1605 int r;
d47effe1 1606
3a4d5c94 1607 r = get_user_pages_fast(log, 1, 1, &page);
d6db3f5c 1608 if (r < 0)
3a4d5c94 1609 return r;
d6db3f5c 1610 BUG_ON(r != 1);
c6daa7ff 1611 base = kmap_atomic(page);
3a4d5c94 1612 set_bit(bit, base);
c6daa7ff 1613 kunmap_atomic(base);
3a4d5c94
MT
1614 set_page_dirty_lock(page);
1615 put_page(page);
1616 return 0;
1617}
1618
1619static int log_write(void __user *log_base,
1620 u64 write_address, u64 write_length)
1621{
28831ee6 1622 u64 write_page = write_address / VHOST_PAGE_SIZE;
3a4d5c94 1623 int r;
d47effe1 1624
3a4d5c94
MT
1625 if (!write_length)
1626 return 0;
3bf9be40 1627 write_length += write_address % VHOST_PAGE_SIZE;
3a4d5c94
MT
1628 for (;;) {
1629 u64 base = (u64)(unsigned long)log_base;
28831ee6
MT
1630 u64 log = base + write_page / 8;
1631 int bit = write_page % 8;
3a4d5c94
MT
1632 if ((u64)(unsigned long)log != log)
1633 return -EFAULT;
1634 r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
1635 if (r < 0)
1636 return r;
1637 if (write_length <= VHOST_PAGE_SIZE)
1638 break;
1639 write_length -= VHOST_PAGE_SIZE;
28831ee6 1640 write_page += 1;
3a4d5c94
MT
1641 }
1642 return r;
1643}
1644
1645int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1646 unsigned int log_num, u64 len)
1647{
1648 int i, r;
1649
1650 /* Make sure data written is seen before log. */
5659338c 1651 smp_wmb();
3a4d5c94
MT
1652 for (i = 0; i < log_num; ++i) {
1653 u64 l = min(log[i].len, len);
1654 r = log_write(vq->log_base, log[i].addr, l);
1655 if (r < 0)
1656 return r;
1657 len -= l;
5786aee8
MT
1658 if (!len) {
1659 if (vq->log_ctx)
1660 eventfd_signal(vq->log_ctx, 1);
3a4d5c94 1661 return 0;
5786aee8 1662 }
3a4d5c94 1663 }
3a4d5c94
MT
1664 /* Length written exceeds what we have stored. This is a bug. */
1665 BUG();
1666 return 0;
1667}
6ac1afbf 1668EXPORT_SYMBOL_GPL(vhost_log_write);
3a4d5c94 1669
2723feaa
JW
1670static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1671{
1672 void __user *used;
bfe2bc51
JW
1673 if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
1674 &vq->used->flags) < 0)
2723feaa
JW
1675 return -EFAULT;
1676 if (unlikely(vq->log_used)) {
1677 /* Make sure the flag is seen before log. */
1678 smp_wmb();
1679 /* Log used flag write. */
1680 used = &vq->used->flags;
1681 log_write(vq->log_base, vq->log_addr +
1682 (used - (void __user *)vq->used),
1683 sizeof vq->used->flags);
1684 if (vq->log_ctx)
1685 eventfd_signal(vq->log_ctx, 1);
1686 }
1687 return 0;
1688}
1689
1690static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1691{
bfe2bc51
JW
1692 if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
1693 vhost_avail_event(vq)))
2723feaa
JW
1694 return -EFAULT;
1695 if (unlikely(vq->log_used)) {
1696 void __user *used;
1697 /* Make sure the event is seen before log. */
1698 smp_wmb();
1699 /* Log avail event write */
1700 used = vhost_avail_event(vq);
1701 log_write(vq->log_base, vq->log_addr +
1702 (used - (void __user *)vq->used),
1703 sizeof *vhost_avail_event(vq));
1704 if (vq->log_ctx)
1705 eventfd_signal(vq->log_ctx, 1);
1706 }
1707 return 0;
1708}
1709
80f7d030 1710int vhost_vq_init_access(struct vhost_virtqueue *vq)
2723feaa 1711{
3b1bbe89 1712 __virtio16 last_used_idx;
2723feaa 1713 int r;
e1f33be9
GK
1714 bool is_le = vq->is_le;
1715
2751c988 1716 if (!vq->private_data) {
c5072037 1717 vhost_reset_is_le(vq);
2723feaa 1718 return 0;
2751c988
GK
1719 }
1720
1721 vhost_init_is_le(vq);
2723feaa
JW
1722
1723 r = vhost_update_used_flags(vq);
1724 if (r)
e1f33be9 1725 goto err;
2723feaa 1726 vq->signalled_used_valid = false;
6b1e6cc7
JW
1727 if (!vq->iotlb &&
1728 !access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
e1f33be9
GK
1729 r = -EFAULT;
1730 goto err;
1731 }
bfe2bc51 1732 r = vhost_get_user(vq, last_used_idx, &vq->used->idx);
6b1e6cc7
JW
1733 if (r) {
1734 vq_err(vq, "Can't access used idx at %p\n",
1735 &vq->used->idx);
e1f33be9 1736 goto err;
6b1e6cc7 1737 }
3b1bbe89 1738 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
64f7f051 1739 return 0;
6b1e6cc7 1740
e1f33be9
GK
1741err:
1742 vq->is_le = is_le;
1743 return r;
2723feaa 1744}
80f7d030 1745EXPORT_SYMBOL_GPL(vhost_vq_init_access);
2723feaa 1746
47283bef 1747static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
6b1e6cc7 1748 struct iovec iov[], int iov_size, int access)
3a4d5c94 1749{
a9709d68 1750 const struct vhost_umem_node *node;
6b1e6cc7
JW
1751 struct vhost_dev *dev = vq->dev;
1752 struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem;
3a4d5c94
MT
1753 struct iovec *_iov;
1754 u64 s = 0;
1755 int ret = 0;
1756
3a4d5c94
MT
1757 while ((u64)len > s) {
1758 u64 size;
7b3384fc 1759 if (unlikely(ret >= iov_size)) {
3a4d5c94
MT
1760 ret = -ENOBUFS;
1761 break;
1762 }
6b1e6cc7 1763
a9709d68
JW
1764 node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
1765 addr, addr + len - 1);
1766 if (node == NULL || node->start > addr) {
6b1e6cc7
JW
1767 if (umem != dev->iotlb) {
1768 ret = -EFAULT;
1769 break;
1770 }
1771 ret = -EAGAIN;
1772 break;
1773 } else if (!(node->perm & access)) {
1774 ret = -EPERM;
3a4d5c94
MT
1775 break;
1776 }
6b1e6cc7 1777
3a4d5c94 1778 _iov = iov + ret;
a9709d68 1779 size = node->size - addr + node->start;
bd97120f 1780 _iov->iov_len = min((u64)len - s, size);
a8d3782f 1781 _iov->iov_base = (void __user *)(unsigned long)
a9709d68 1782 (node->userspace_addr + addr - node->start);
3a4d5c94
MT
1783 s += size;
1784 addr += size;
1785 ++ret;
1786 }
1787
6b1e6cc7
JW
1788 if (ret == -EAGAIN)
1789 vhost_iotlb_miss(vq, addr, access);
3a4d5c94
MT
1790 return ret;
1791}
1792
1793/* Each buffer in the virtqueues is actually a chain of descriptors. This
1794 * function returns the next descriptor in the chain,
1795 * or -1U if we're at the end. */
3b1bbe89 1796static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
3a4d5c94
MT
1797{
1798 unsigned int next;
1799
1800 /* If this descriptor says it doesn't chain, we're done. */
3b1bbe89 1801 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
3a4d5c94
MT
1802 return -1U;
1803
1804 /* Check they're not leading us off end of descriptors. */
3b1bbe89 1805 next = vhost16_to_cpu(vq, desc->next);
3a4d5c94
MT
1806 /* Make sure compiler knows to grab that: we don't want it changing! */
1807 /* We will use the result as an index in an array, so most
1808 * architectures only need a compiler barrier here. */
1809 read_barrier_depends();
1810
1811 return next;
1812}
1813
47283bef 1814static int get_indirect(struct vhost_virtqueue *vq,
7b3384fc
MT
1815 struct iovec iov[], unsigned int iov_size,
1816 unsigned int *out_num, unsigned int *in_num,
1817 struct vhost_log *log, unsigned int *log_num,
1818 struct vring_desc *indirect)
3a4d5c94
MT
1819{
1820 struct vring_desc desc;
1821 unsigned int i = 0, count, found = 0;
3b1bbe89 1822 u32 len = vhost32_to_cpu(vq, indirect->len);
aad9a1ce 1823 struct iov_iter from;
6b1e6cc7 1824 int ret, access;
3a4d5c94
MT
1825
1826 /* Sanity check */
3b1bbe89 1827 if (unlikely(len % sizeof desc)) {
3a4d5c94
MT
1828 vq_err(vq, "Invalid length in indirect descriptor: "
1829 "len 0x%llx not multiple of 0x%zx\n",
3b1bbe89 1830 (unsigned long long)len,
3a4d5c94
MT
1831 sizeof desc);
1832 return -EINVAL;
1833 }
1834
3b1bbe89 1835 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
6b1e6cc7 1836 UIO_MAXIOV, VHOST_ACCESS_RO);
7b3384fc 1837 if (unlikely(ret < 0)) {
6b1e6cc7
JW
1838 if (ret != -EAGAIN)
1839 vq_err(vq, "Translation failure %d in indirect.\n", ret);
3a4d5c94
MT
1840 return ret;
1841 }
aad9a1ce 1842 iov_iter_init(&from, READ, vq->indirect, ret, len);
3a4d5c94
MT
1843
1844 /* We will use the result as an address to read from, so most
1845 * architectures only need a compiler barrier here. */
1846 read_barrier_depends();
1847
3b1bbe89 1848 count = len / sizeof desc;
3a4d5c94
MT
1849 /* Buffers are chained via a 16 bit next field, so
1850 * we can have at most 2^16 of these. */
7b3384fc 1851 if (unlikely(count > USHRT_MAX + 1)) {
3a4d5c94
MT
1852 vq_err(vq, "Indirect buffer length too big: %d\n",
1853 indirect->len);
1854 return -E2BIG;
1855 }
1856
1857 do {
1858 unsigned iov_count = *in_num + *out_num;
7b3384fc 1859 if (unlikely(++found > count)) {
3a4d5c94
MT
1860 vq_err(vq, "Loop detected: last one at %u "
1861 "indirect size %u\n",
1862 i, count);
1863 return -EINVAL;
1864 }
aad9a1ce
AV
1865 if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) !=
1866 sizeof(desc))) {
3a4d5c94 1867 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
3b1bbe89 1868 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
3a4d5c94
MT
1869 return -EINVAL;
1870 }
3b1bbe89 1871 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
3a4d5c94 1872 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
3b1bbe89 1873 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
3a4d5c94
MT
1874 return -EINVAL;
1875 }
1876
6b1e6cc7
JW
1877 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
1878 access = VHOST_ACCESS_WO;
1879 else
1880 access = VHOST_ACCESS_RO;
1881
3b1bbe89
MT
1882 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
1883 vhost32_to_cpu(vq, desc.len), iov + iov_count,
6b1e6cc7 1884 iov_size - iov_count, access);
7b3384fc 1885 if (unlikely(ret < 0)) {
6b1e6cc7
JW
1886 if (ret != -EAGAIN)
1887 vq_err(vq, "Translation failure %d indirect idx %d\n",
1888 ret, i);
3a4d5c94
MT
1889 return ret;
1890 }
1891 /* If this is an input descriptor, increment that count. */
6b1e6cc7 1892 if (access == VHOST_ACCESS_WO) {
3a4d5c94
MT
1893 *in_num += ret;
1894 if (unlikely(log)) {
3b1bbe89
MT
1895 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
1896 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
3a4d5c94
MT
1897 ++*log_num;
1898 }
1899 } else {
1900 /* If it's an output descriptor, they're all supposed
1901 * to come before any input descriptors. */
7b3384fc 1902 if (unlikely(*in_num)) {
3a4d5c94
MT
1903 vq_err(vq, "Indirect descriptor "
1904 "has out after in: idx %d\n", i);
1905 return -EINVAL;
1906 }
1907 *out_num += ret;
1908 }
3b1bbe89 1909 } while ((i = next_desc(vq, &desc)) != -1);
3a4d5c94
MT
1910 return 0;
1911}
1912
1913/* This looks in the virtqueue and for the first available buffer, and converts
1914 * it to an iovec for convenient access. Since descriptors consist of some
1915 * number of output then some number of input descriptors, it's actually two
1916 * iovecs, but we pack them into one and note how many of each there were.
1917 *
d5675bd2
MT
1918 * This function returns the descriptor number found, or vq->num (which is
1919 * never a valid descriptor number) if none was found. A negative code is
1920 * returned on error. */
47283bef 1921int vhost_get_vq_desc(struct vhost_virtqueue *vq,
d5675bd2
MT
1922 struct iovec iov[], unsigned int iov_size,
1923 unsigned int *out_num, unsigned int *in_num,
1924 struct vhost_log *log, unsigned int *log_num)
3a4d5c94
MT
1925{
1926 struct vring_desc desc;
1927 unsigned int i, head, found = 0;
1928 u16 last_avail_idx;
3b1bbe89
MT
1929 __virtio16 avail_idx;
1930 __virtio16 ring_head;
6b1e6cc7 1931 int ret, access;
3a4d5c94
MT
1932
1933 /* Check it isn't doing very strange things with descriptor numbers. */
1934 last_avail_idx = vq->last_avail_idx;
bfe2bc51 1935 if (unlikely(vhost_get_user(vq, avail_idx, &vq->avail->idx))) {
3a4d5c94
MT
1936 vq_err(vq, "Failed to access avail idx at %p\n",
1937 &vq->avail->idx);
d5675bd2 1938 return -EFAULT;
3a4d5c94 1939 }
3b1bbe89 1940 vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
3a4d5c94 1941
7b3384fc 1942 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
3a4d5c94
MT
1943 vq_err(vq, "Guest moved used index from %u to %u",
1944 last_avail_idx, vq->avail_idx);
d5675bd2 1945 return -EFAULT;
3a4d5c94
MT
1946 }
1947
1948 /* If there's nothing new since last we looked, return invalid. */
1949 if (vq->avail_idx == last_avail_idx)
1950 return vq->num;
1951
1952 /* Only get avail ring entries after they have been exposed by guest. */
5659338c 1953 smp_rmb();
3a4d5c94
MT
1954
1955 /* Grab the next descriptor number they're advertising, and increment
1956 * the index we've seen. */
bfe2bc51
JW
1957 if (unlikely(vhost_get_user(vq, ring_head,
1958 &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
3a4d5c94
MT
1959 vq_err(vq, "Failed to read head: idx %d address %p\n",
1960 last_avail_idx,
1961 &vq->avail->ring[last_avail_idx % vq->num]);
d5675bd2 1962 return -EFAULT;
3a4d5c94
MT
1963 }
1964
3b1bbe89
MT
1965 head = vhost16_to_cpu(vq, ring_head);
1966
3a4d5c94 1967 /* If their number is silly, that's an error. */
7b3384fc 1968 if (unlikely(head >= vq->num)) {
3a4d5c94
MT
1969 vq_err(vq, "Guest says index %u > %u is available",
1970 head, vq->num);
d5675bd2 1971 return -EINVAL;
3a4d5c94
MT
1972 }
1973
1974 /* When we start there are none of either input nor output. */
1975 *out_num = *in_num = 0;
1976 if (unlikely(log))
1977 *log_num = 0;
1978
1979 i = head;
1980 do {
1981 unsigned iov_count = *in_num + *out_num;
7b3384fc 1982 if (unlikely(i >= vq->num)) {
3a4d5c94
MT
1983 vq_err(vq, "Desc index is %u > %u, head = %u",
1984 i, vq->num, head);
d5675bd2 1985 return -EINVAL;
3a4d5c94 1986 }
7b3384fc 1987 if (unlikely(++found > vq->num)) {
3a4d5c94
MT
1988 vq_err(vq, "Loop detected: last one at %u "
1989 "vq size %u head %u\n",
1990 i, vq->num, head);
d5675bd2 1991 return -EINVAL;
3a4d5c94 1992 }
bfe2bc51
JW
1993 ret = vhost_copy_from_user(vq, &desc, vq->desc + i,
1994 sizeof desc);
7b3384fc 1995 if (unlikely(ret)) {
3a4d5c94
MT
1996 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
1997 i, vq->desc + i);
d5675bd2 1998 return -EFAULT;
3a4d5c94 1999 }
3b1bbe89 2000 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
47283bef 2001 ret = get_indirect(vq, iov, iov_size,
3a4d5c94
MT
2002 out_num, in_num,
2003 log, log_num, &desc);
7b3384fc 2004 if (unlikely(ret < 0)) {
6b1e6cc7
JW
2005 if (ret != -EAGAIN)
2006 vq_err(vq, "Failure detected "
2007 "in indirect descriptor at idx %d\n", i);
d5675bd2 2008 return ret;
3a4d5c94
MT
2009 }
2010 continue;
2011 }
2012
6b1e6cc7
JW
2013 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2014 access = VHOST_ACCESS_WO;
2015 else
2016 access = VHOST_ACCESS_RO;
3b1bbe89
MT
2017 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2018 vhost32_to_cpu(vq, desc.len), iov + iov_count,
6b1e6cc7 2019 iov_size - iov_count, access);
7b3384fc 2020 if (unlikely(ret < 0)) {
6b1e6cc7
JW
2021 if (ret != -EAGAIN)
2022 vq_err(vq, "Translation failure %d descriptor idx %d\n",
2023 ret, i);
d5675bd2 2024 return ret;
3a4d5c94 2025 }
6b1e6cc7 2026 if (access == VHOST_ACCESS_WO) {
3a4d5c94
MT
2027 /* If this is an input descriptor,
2028 * increment that count. */
2029 *in_num += ret;
2030 if (unlikely(log)) {
3b1bbe89
MT
2031 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2032 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
3a4d5c94
MT
2033 ++*log_num;
2034 }
2035 } else {
2036 /* If it's an output descriptor, they're all supposed
2037 * to come before any input descriptors. */
7b3384fc 2038 if (unlikely(*in_num)) {
3a4d5c94
MT
2039 vq_err(vq, "Descriptor has out after in: "
2040 "idx %d\n", i);
d5675bd2 2041 return -EINVAL;
3a4d5c94
MT
2042 }
2043 *out_num += ret;
2044 }
3b1bbe89 2045 } while ((i = next_desc(vq, &desc)) != -1);
3a4d5c94
MT
2046
2047 /* On success, increment avail index. */
2048 vq->last_avail_idx++;
8ea8cf89
MT
2049
2050 /* Assume notifications from guest are disabled at this point,
2051 * if they aren't we would need to update avail_event index. */
2052 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
3a4d5c94
MT
2053 return head;
2054}
6ac1afbf 2055EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
3a4d5c94
MT
2056
2057/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
8dd014ad 2058void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
3a4d5c94 2059{
8dd014ad 2060 vq->last_avail_idx -= n;
3a4d5c94 2061}
6ac1afbf 2062EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
3a4d5c94
MT
2063
2064/* After we've used one of their buffers, we tell them about it. We'll then
2065 * want to notify the guest, using eventfd. */
2066int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2067{
3b1bbe89
MT
2068 struct vring_used_elem heads = {
2069 cpu_to_vhost32(vq, head),
2070 cpu_to_vhost32(vq, len)
2071 };
3a4d5c94 2072
c49e4e57 2073 return vhost_add_used_n(vq, &heads, 1);
3a4d5c94 2074}
6ac1afbf 2075EXPORT_SYMBOL_GPL(vhost_add_used);
3a4d5c94 2076
8dd014ad
DS
2077static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2078 struct vring_used_elem *heads,
2079 unsigned count)
2080{
2081 struct vring_used_elem __user *used;
8ea8cf89 2082 u16 old, new;
8dd014ad
DS
2083 int start;
2084
5fba13b5 2085 start = vq->last_used_idx & (vq->num - 1);
8dd014ad 2086 used = vq->used->ring + start;
c49e4e57 2087 if (count == 1) {
bfe2bc51 2088 if (vhost_put_user(vq, heads[0].id, &used->id)) {
c49e4e57
JW
2089 vq_err(vq, "Failed to write used id");
2090 return -EFAULT;
2091 }
bfe2bc51 2092 if (vhost_put_user(vq, heads[0].len, &used->len)) {
c49e4e57
JW
2093 vq_err(vq, "Failed to write used len");
2094 return -EFAULT;
2095 }
bfe2bc51 2096 } else if (vhost_copy_to_user(vq, used, heads, count * sizeof *used)) {
8dd014ad
DS
2097 vq_err(vq, "Failed to write used");
2098 return -EFAULT;
2099 }
2100 if (unlikely(vq->log_used)) {
2101 /* Make sure data is seen before log. */
2102 smp_wmb();
2103 /* Log used ring entry write. */
2104 log_write(vq->log_base,
2105 vq->log_addr +
2106 ((void __user *)used - (void __user *)vq->used),
2107 count * sizeof *used);
2108 }
8ea8cf89
MT
2109 old = vq->last_used_idx;
2110 new = (vq->last_used_idx += count);
2111 /* If the driver never bothers to signal in a very long while,
2112 * used index might wrap around. If that happens, invalidate
2113 * signalled_used index we stored. TODO: make sure driver
2114 * signals at least once in 2^16 and remove this. */
2115 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2116 vq->signalled_used_valid = false;
8dd014ad
DS
2117 return 0;
2118}
2119
2120/* After we've used one of their buffers, we tell them about it. We'll then
2121 * want to notify the guest, using eventfd. */
2122int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2123 unsigned count)
2124{
2125 int start, n, r;
2126
5fba13b5 2127 start = vq->last_used_idx & (vq->num - 1);
8dd014ad
DS
2128 n = vq->num - start;
2129 if (n < count) {
2130 r = __vhost_add_used_n(vq, heads, n);
2131 if (r < 0)
2132 return r;
2133 heads += n;
2134 count -= n;
2135 }
2136 r = __vhost_add_used_n(vq, heads, count);
2137
2138 /* Make sure buffer is written before we update index. */
2139 smp_wmb();
bfe2bc51
JW
2140 if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
2141 &vq->used->idx)) {
8dd014ad
DS
2142 vq_err(vq, "Failed to increment used idx");
2143 return -EFAULT;
2144 }
2145 if (unlikely(vq->log_used)) {
2146 /* Log used index update. */
2147 log_write(vq->log_base,
2148 vq->log_addr + offsetof(struct vring_used, idx),
2149 sizeof vq->used->idx);
2150 if (vq->log_ctx)
2151 eventfd_signal(vq->log_ctx, 1);
2152 }
2153 return r;
2154}
6ac1afbf 2155EXPORT_SYMBOL_GPL(vhost_add_used_n);
8dd014ad 2156
8ea8cf89 2157static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
3a4d5c94 2158{
3b1bbe89
MT
2159 __u16 old, new;
2160 __virtio16 event;
8ea8cf89 2161 bool v;
0d499356
MT
2162 /* Flush out used index updates. This is paired
2163 * with the barrier that the Guest executes when enabling
2164 * interrupts. */
2165 smp_mb();
2166
ea16c514 2167 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
8ea8cf89
MT
2168 unlikely(vq->avail_idx == vq->last_avail_idx))
2169 return true;
2170
ea16c514 2171 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
3b1bbe89 2172 __virtio16 flags;
bfe2bc51 2173 if (vhost_get_user(vq, flags, &vq->avail->flags)) {
8ea8cf89
MT
2174 vq_err(vq, "Failed to get flags");
2175 return true;
2176 }
3b1bbe89 2177 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
3a4d5c94 2178 }
8ea8cf89
MT
2179 old = vq->signalled_used;
2180 v = vq->signalled_used_valid;
2181 new = vq->signalled_used = vq->last_used_idx;
2182 vq->signalled_used_valid = true;
3a4d5c94 2183
8ea8cf89
MT
2184 if (unlikely(!v))
2185 return true;
3a4d5c94 2186
bfe2bc51 2187 if (vhost_get_user(vq, event, vhost_used_event(vq))) {
8ea8cf89
MT
2188 vq_err(vq, "Failed to get used event idx");
2189 return true;
2190 }
3b1bbe89 2191 return vring_need_event(vhost16_to_cpu(vq, event), new, old);
8ea8cf89
MT
2192}
2193
2194/* This actually signals the guest, using eventfd. */
2195void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2196{
3a4d5c94 2197 /* Signal the Guest tell them we used something up. */
8ea8cf89 2198 if (vq->call_ctx && vhost_notify(dev, vq))
3a4d5c94
MT
2199 eventfd_signal(vq->call_ctx, 1);
2200}
6ac1afbf 2201EXPORT_SYMBOL_GPL(vhost_signal);
3a4d5c94
MT
2202
2203/* And here's the combo meal deal. Supersize me! */
2204void vhost_add_used_and_signal(struct vhost_dev *dev,
2205 struct vhost_virtqueue *vq,
2206 unsigned int head, int len)
2207{
2208 vhost_add_used(vq, head, len);
2209 vhost_signal(dev, vq);
2210}
6ac1afbf 2211EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
3a4d5c94 2212
8dd014ad
DS
2213/* multi-buffer version of vhost_add_used_and_signal */
2214void vhost_add_used_and_signal_n(struct vhost_dev *dev,
2215 struct vhost_virtqueue *vq,
2216 struct vring_used_elem *heads, unsigned count)
2217{
2218 vhost_add_used_n(vq, heads, count);
2219 vhost_signal(dev, vq);
2220}
6ac1afbf 2221EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
8dd014ad 2222
d4a60603
JW
2223/* return true if we're sure that avaiable ring is empty */
2224bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2225{
2226 __virtio16 avail_idx;
2227 int r;
2228
bfe2bc51 2229 r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
d4a60603
JW
2230 if (r)
2231 return false;
2232
2233 return vhost16_to_cpu(vq, avail_idx) == vq->avail_idx;
2234}
2235EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
2236
3a4d5c94 2237/* OK, now we need to know about added descriptors. */
8ea8cf89 2238bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
3a4d5c94 2239{
3b1bbe89 2240 __virtio16 avail_idx;
3a4d5c94 2241 int r;
d47effe1 2242
3a4d5c94
MT
2243 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
2244 return false;
2245 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
ea16c514 2246 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2723feaa 2247 r = vhost_update_used_flags(vq);
8ea8cf89
MT
2248 if (r) {
2249 vq_err(vq, "Failed to enable notification at %p: %d\n",
2250 &vq->used->flags, r);
2251 return false;
2252 }
2253 } else {
2723feaa 2254 r = vhost_update_avail_event(vq, vq->avail_idx);
8ea8cf89
MT
2255 if (r) {
2256 vq_err(vq, "Failed to update avail event index at %p: %d\n",
2257 vhost_avail_event(vq), r);
2258 return false;
2259 }
2260 }
3a4d5c94
MT
2261 /* They could have slipped one in as we were doing that: make
2262 * sure it's written, then check again. */
5659338c 2263 smp_mb();
bfe2bc51 2264 r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
3a4d5c94
MT
2265 if (r) {
2266 vq_err(vq, "Failed to check avail idx at %p: %d\n",
2267 &vq->avail->idx, r);
2268 return false;
2269 }
2270
3b1bbe89 2271 return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
3a4d5c94 2272}
6ac1afbf 2273EXPORT_SYMBOL_GPL(vhost_enable_notify);
3a4d5c94
MT
2274
2275/* We don't need to be notified again. */
8ea8cf89 2276void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
3a4d5c94
MT
2277{
2278 int r;
d47effe1 2279
3a4d5c94
MT
2280 if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
2281 return;
2282 vq->used_flags |= VRING_USED_F_NO_NOTIFY;
ea16c514 2283 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2723feaa 2284 r = vhost_update_used_flags(vq);
8ea8cf89
MT
2285 if (r)
2286 vq_err(vq, "Failed to enable notification at %p: %d\n",
2287 &vq->used->flags, r);
2288 }
3a4d5c94 2289}
6ac1afbf
AH
2290EXPORT_SYMBOL_GPL(vhost_disable_notify);
2291
6b1e6cc7
JW
2292/* Create a new message. */
2293struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
2294{
2295 struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
2296 if (!node)
2297 return NULL;
2298 node->vq = vq;
2299 node->msg.type = type;
2300 return node;
2301}
2302EXPORT_SYMBOL_GPL(vhost_new_msg);
2303
2304void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
2305 struct vhost_msg_node *node)
2306{
2307 spin_lock(&dev->iotlb_lock);
2308 list_add_tail(&node->node, head);
2309 spin_unlock(&dev->iotlb_lock);
2310
2311 wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM);
2312}
2313EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
2314
2315struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
2316 struct list_head *head)
2317{
2318 struct vhost_msg_node *node = NULL;
2319
2320 spin_lock(&dev->iotlb_lock);
2321 if (!list_empty(head)) {
2322 node = list_first_entry(head, struct vhost_msg_node,
2323 node);
2324 list_del(&node->node);
2325 }
2326 spin_unlock(&dev->iotlb_lock);
2327
2328 return node;
2329}
2330EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
2331
2332
6ac1afbf
AH
2333static int __init vhost_init(void)
2334{
2335 return 0;
2336}
2337
2338static void __exit vhost_exit(void)
2339{
2340}
2341
2342module_init(vhost_init);
2343module_exit(vhost_exit);
2344
2345MODULE_VERSION("0.0.1");
2346MODULE_LICENSE("GPL v2");
2347MODULE_AUTHOR("Michael S. Tsirkin");
2348MODULE_DESCRIPTION("Host kernel accelerator for virtio");
This page took 0.534577 seconds and 5 git commands to generate.