1 /* Copyright (C) 2009 Red Hat, Inc.
2 * Copyright (C) 2006 Rusty Russell IBM Corporation
4 * Author: Michael S. Tsirkin <mst@redhat.com>
6 * Inspiration, some code, and most witty comments come from
7 * Documentation/lguest/lguest.c, by Rusty Russell
9 * This work is licensed under the terms of the GNU GPL, version 2.
11 * Generic code for virtio server in host kernel.
14 #include <linux/eventfd.h>
15 #include <linux/vhost.h>
16 #include <linux/virtio_net.h>
18 #include <linux/miscdevice.h>
19 #include <linux/mutex.h>
20 #include <linux/rcupdate.h>
21 #include <linux/poll.h>
22 #include <linux/file.h>
23 #include <linux/highmem.h>
24 #include <linux/slab.h>
25 #include <linux/kthread.h>
26 #include <linux/cgroup.h>
28 #include <linux/net.h>
29 #include <linux/if_packet.h>
30 #include <linux/if_arp.h>
37 VHOST_MEMORY_MAX_NREGIONS
= 64,
38 VHOST_MEMORY_F_LOG
= 0x1,
41 static void vhost_poll_func(struct file
*file
, wait_queue_head_t
*wqh
,
44 struct vhost_poll
*poll
;
45 poll
= container_of(pt
, struct vhost_poll
, table
);
48 add_wait_queue(wqh
, &poll
->wait
);
51 static int vhost_poll_wakeup(wait_queue_t
*wait
, unsigned mode
, int sync
,
54 struct vhost_poll
*poll
= container_of(wait
, struct vhost_poll
, wait
);
56 if (!((unsigned long)key
& poll
->mask
))
59 vhost_poll_queue(poll
);
63 static void vhost_work_init(struct vhost_work
*work
, vhost_work_fn_t fn
)
65 INIT_LIST_HEAD(&work
->node
);
67 init_waitqueue_head(&work
->done
);
69 work
->queue_seq
= work
->done_seq
= 0;
72 /* Init poll structure */
73 void vhost_poll_init(struct vhost_poll
*poll
, vhost_work_fn_t fn
,
74 unsigned long mask
, struct vhost_dev
*dev
)
76 init_waitqueue_func_entry(&poll
->wait
, vhost_poll_wakeup
);
77 init_poll_funcptr(&poll
->table
, vhost_poll_func
);
81 vhost_work_init(&poll
->work
, fn
);
84 /* Start polling a file. We add ourselves to file's wait queue. The caller must
85 * keep a reference to a file until after vhost_poll_stop is called. */
86 void vhost_poll_start(struct vhost_poll
*poll
, struct file
*file
)
89 mask
= file
->f_op
->poll(file
, &poll
->table
);
91 vhost_poll_wakeup(&poll
->wait
, 0, 0, (void *)mask
);
94 /* Stop polling a file. After this function returns, it becomes safe to drop the
95 * file reference. You must also flush afterwards. */
96 void vhost_poll_stop(struct vhost_poll
*poll
)
98 remove_wait_queue(poll
->wqh
, &poll
->wait
);
101 static void vhost_work_flush(struct vhost_dev
*dev
, struct vhost_work
*work
)
107 spin_lock_irq(&dev
->work_lock
);
108 seq
= work
->queue_seq
;
110 spin_unlock_irq(&dev
->work_lock
);
111 wait_event(work
->done
, ({
112 spin_lock_irq(&dev
->work_lock
);
113 left
= seq
- work
->done_seq
<= 0;
114 spin_unlock_irq(&dev
->work_lock
);
117 spin_lock_irq(&dev
->work_lock
);
118 flushing
= --work
->flushing
;
119 spin_unlock_irq(&dev
->work_lock
);
120 BUG_ON(flushing
< 0);
123 /* Flush any work that has been scheduled. When calling this, don't hold any
124 * locks that are also used by the callback. */
125 void vhost_poll_flush(struct vhost_poll
*poll
)
127 vhost_work_flush(poll
->dev
, &poll
->work
);
130 static inline void vhost_work_queue(struct vhost_dev
*dev
,
131 struct vhost_work
*work
)
135 spin_lock_irqsave(&dev
->work_lock
, flags
);
136 if (list_empty(&work
->node
)) {
137 list_add_tail(&work
->node
, &dev
->work_list
);
139 wake_up_process(dev
->worker
);
141 spin_unlock_irqrestore(&dev
->work_lock
, flags
);
144 void vhost_poll_queue(struct vhost_poll
*poll
)
146 vhost_work_queue(poll
->dev
, &poll
->work
);
149 static void vhost_vq_reset(struct vhost_dev
*dev
,
150 struct vhost_virtqueue
*vq
)
156 vq
->last_avail_idx
= 0;
158 vq
->last_used_idx
= 0;
160 vq
->log_used
= false;
161 vq
->log_addr
= -1ull;
164 vq
->private_data
= NULL
;
166 vq
->error_ctx
= NULL
;
174 static int vhost_worker(void *data
)
176 struct vhost_dev
*dev
= data
;
177 struct vhost_work
*work
= NULL
;
178 unsigned uninitialized_var(seq
);
181 /* mb paired w/ kthread_stop */
182 set_current_state(TASK_INTERRUPTIBLE
);
184 spin_lock_irq(&dev
->work_lock
);
186 work
->done_seq
= seq
;
188 wake_up_all(&work
->done
);
191 if (kthread_should_stop()) {
192 spin_unlock_irq(&dev
->work_lock
);
193 __set_current_state(TASK_RUNNING
);
196 if (!list_empty(&dev
->work_list
)) {
197 work
= list_first_entry(&dev
->work_list
,
198 struct vhost_work
, node
);
199 list_del_init(&work
->node
);
200 seq
= work
->queue_seq
;
203 spin_unlock_irq(&dev
->work_lock
);
206 __set_current_state(TASK_RUNNING
);
214 /* Helper to allocate iovec buffers for all vqs. */
215 static long vhost_dev_alloc_iovecs(struct vhost_dev
*dev
)
218 for (i
= 0; i
< dev
->nvqs
; ++i
) {
219 dev
->vqs
[i
].indirect
= kmalloc(sizeof *dev
->vqs
[i
].indirect
*
220 UIO_MAXIOV
, GFP_KERNEL
);
221 dev
->vqs
[i
].log
= kmalloc(sizeof *dev
->vqs
[i
].log
* UIO_MAXIOV
,
223 dev
->vqs
[i
].heads
= kmalloc(sizeof *dev
->vqs
[i
].heads
*
224 UIO_MAXIOV
, GFP_KERNEL
);
226 if (!dev
->vqs
[i
].indirect
|| !dev
->vqs
[i
].log
||
232 for (; i
>= 0; --i
) {
233 kfree(dev
->vqs
[i
].indirect
);
234 kfree(dev
->vqs
[i
].log
);
235 kfree(dev
->vqs
[i
].heads
);
240 static void vhost_dev_free_iovecs(struct vhost_dev
*dev
)
243 for (i
= 0; i
< dev
->nvqs
; ++i
) {
244 kfree(dev
->vqs
[i
].indirect
);
245 dev
->vqs
[i
].indirect
= NULL
;
246 kfree(dev
->vqs
[i
].log
);
247 dev
->vqs
[i
].log
= NULL
;
248 kfree(dev
->vqs
[i
].heads
);
249 dev
->vqs
[i
].heads
= NULL
;
253 long vhost_dev_init(struct vhost_dev
*dev
,
254 struct vhost_virtqueue
*vqs
, int nvqs
)
260 mutex_init(&dev
->mutex
);
262 dev
->log_file
= NULL
;
265 spin_lock_init(&dev
->work_lock
);
266 INIT_LIST_HEAD(&dev
->work_list
);
269 for (i
= 0; i
< dev
->nvqs
; ++i
) {
270 dev
->vqs
[i
].log
= NULL
;
271 dev
->vqs
[i
].indirect
= NULL
;
272 dev
->vqs
[i
].heads
= NULL
;
273 dev
->vqs
[i
].dev
= dev
;
274 mutex_init(&dev
->vqs
[i
].mutex
);
275 vhost_vq_reset(dev
, dev
->vqs
+ i
);
276 if (dev
->vqs
[i
].handle_kick
)
277 vhost_poll_init(&dev
->vqs
[i
].poll
,
278 dev
->vqs
[i
].handle_kick
, POLLIN
, dev
);
284 /* Caller should have device mutex */
285 long vhost_dev_check_owner(struct vhost_dev
*dev
)
287 /* Are you the owner? If not, I don't think you mean to do that */
288 return dev
->mm
== current
->mm
? 0 : -EPERM
;
291 struct vhost_attach_cgroups_struct
{
292 struct vhost_work work
;
293 struct task_struct
*owner
;
297 static void vhost_attach_cgroups_work(struct vhost_work
*work
)
299 struct vhost_attach_cgroups_struct
*s
;
300 s
= container_of(work
, struct vhost_attach_cgroups_struct
, work
);
301 s
->ret
= cgroup_attach_task_all(s
->owner
, current
);
304 static int vhost_attach_cgroups(struct vhost_dev
*dev
)
306 struct vhost_attach_cgroups_struct attach
;
307 attach
.owner
= current
;
308 vhost_work_init(&attach
.work
, vhost_attach_cgroups_work
);
309 vhost_work_queue(dev
, &attach
.work
);
310 vhost_work_flush(dev
, &attach
.work
);
314 /* Caller should have device mutex */
315 static long vhost_dev_set_owner(struct vhost_dev
*dev
)
317 struct task_struct
*worker
;
319 /* Is there an owner already? */
324 /* No owner, become one */
325 dev
->mm
= get_task_mm(current
);
326 worker
= kthread_create(vhost_worker
, dev
, "vhost-%d", current
->pid
);
327 if (IS_ERR(worker
)) {
328 err
= PTR_ERR(worker
);
332 dev
->worker
= worker
;
333 wake_up_process(worker
); /* avoid contributing to loadavg */
335 err
= vhost_attach_cgroups(dev
);
339 err
= vhost_dev_alloc_iovecs(dev
);
345 kthread_stop(worker
);
355 /* Caller should have device mutex */
356 long vhost_dev_reset_owner(struct vhost_dev
*dev
)
358 struct vhost_memory
*memory
;
360 /* Restore memory to default empty mapping. */
361 memory
= kmalloc(offsetof(struct vhost_memory
, regions
), GFP_KERNEL
);
365 vhost_dev_cleanup(dev
);
367 memory
->nregions
= 0;
368 RCU_INIT_POINTER(dev
->memory
, memory
);
372 /* Caller should have device mutex */
373 void vhost_dev_cleanup(struct vhost_dev
*dev
)
376 for (i
= 0; i
< dev
->nvqs
; ++i
) {
377 if (dev
->vqs
[i
].kick
&& dev
->vqs
[i
].handle_kick
) {
378 vhost_poll_stop(&dev
->vqs
[i
].poll
);
379 vhost_poll_flush(&dev
->vqs
[i
].poll
);
381 if (dev
->vqs
[i
].error_ctx
)
382 eventfd_ctx_put(dev
->vqs
[i
].error_ctx
);
383 if (dev
->vqs
[i
].error
)
384 fput(dev
->vqs
[i
].error
);
385 if (dev
->vqs
[i
].kick
)
386 fput(dev
->vqs
[i
].kick
);
387 if (dev
->vqs
[i
].call_ctx
)
388 eventfd_ctx_put(dev
->vqs
[i
].call_ctx
);
389 if (dev
->vqs
[i
].call
)
390 fput(dev
->vqs
[i
].call
);
391 vhost_vq_reset(dev
, dev
->vqs
+ i
);
393 vhost_dev_free_iovecs(dev
);
395 eventfd_ctx_put(dev
->log_ctx
);
399 dev
->log_file
= NULL
;
400 /* No one will access memory at this point */
401 kfree(rcu_dereference_protected(dev
->memory
,
402 lockdep_is_held(&dev
->mutex
)));
403 RCU_INIT_POINTER(dev
->memory
, NULL
);
404 WARN_ON(!list_empty(&dev
->work_list
));
406 kthread_stop(dev
->worker
);
414 static int log_access_ok(void __user
*log_base
, u64 addr
, unsigned long sz
)
416 u64 a
= addr
/ VHOST_PAGE_SIZE
/ 8;
417 /* Make sure 64 bit math will not overflow. */
418 if (a
> ULONG_MAX
- (unsigned long)log_base
||
419 a
+ (unsigned long)log_base
> ULONG_MAX
)
422 return access_ok(VERIFY_WRITE
, log_base
+ a
,
423 (sz
+ VHOST_PAGE_SIZE
* 8 - 1) / VHOST_PAGE_SIZE
/ 8);
426 /* Caller should have vq mutex and device mutex. */
427 static int vq_memory_access_ok(void __user
*log_base
, struct vhost_memory
*mem
,
435 for (i
= 0; i
< mem
->nregions
; ++i
) {
436 struct vhost_memory_region
*m
= mem
->regions
+ i
;
437 unsigned long a
= m
->userspace_addr
;
438 if (m
->memory_size
> ULONG_MAX
)
440 else if (!access_ok(VERIFY_WRITE
, (void __user
*)a
,
443 else if (log_all
&& !log_access_ok(log_base
,
451 /* Can we switch to this memory table? */
452 /* Caller should have device mutex but not vq mutex */
453 static int memory_access_ok(struct vhost_dev
*d
, struct vhost_memory
*mem
,
457 for (i
= 0; i
< d
->nvqs
; ++i
) {
459 mutex_lock(&d
->vqs
[i
].mutex
);
460 /* If ring is inactive, will check when it's enabled. */
461 if (d
->vqs
[i
].private_data
)
462 ok
= vq_memory_access_ok(d
->vqs
[i
].log_base
, mem
,
466 mutex_unlock(&d
->vqs
[i
].mutex
);
473 static int vq_access_ok(unsigned int num
,
474 struct vring_desc __user
*desc
,
475 struct vring_avail __user
*avail
,
476 struct vring_used __user
*used
)
478 return access_ok(VERIFY_READ
, desc
, num
* sizeof *desc
) &&
479 access_ok(VERIFY_READ
, avail
,
480 sizeof *avail
+ num
* sizeof *avail
->ring
) &&
481 access_ok(VERIFY_WRITE
, used
,
482 sizeof *used
+ num
* sizeof *used
->ring
);
485 /* Can we log writes? */
486 /* Caller should have device mutex but not vq mutex */
487 int vhost_log_access_ok(struct vhost_dev
*dev
)
489 struct vhost_memory
*mp
;
491 mp
= rcu_dereference_protected(dev
->memory
,
492 lockdep_is_held(&dev
->mutex
));
493 return memory_access_ok(dev
, mp
, 1);
496 /* Verify access for write logging. */
497 /* Caller should have vq mutex and device mutex */
498 static int vq_log_access_ok(struct vhost_virtqueue
*vq
, void __user
*log_base
)
500 struct vhost_memory
*mp
;
502 mp
= rcu_dereference_protected(vq
->dev
->memory
,
503 lockdep_is_held(&vq
->mutex
));
504 return vq_memory_access_ok(log_base
, mp
,
505 vhost_has_feature(vq
->dev
, VHOST_F_LOG_ALL
)) &&
506 (!vq
->log_used
|| log_access_ok(log_base
, vq
->log_addr
,
508 vq
->num
* sizeof *vq
->used
->ring
));
511 /* Can we start vq? */
512 /* Caller should have vq mutex and device mutex */
513 int vhost_vq_access_ok(struct vhost_virtqueue
*vq
)
515 return vq_access_ok(vq
->num
, vq
->desc
, vq
->avail
, vq
->used
) &&
516 vq_log_access_ok(vq
, vq
->log_base
);
519 static long vhost_set_memory(struct vhost_dev
*d
, struct vhost_memory __user
*m
)
521 struct vhost_memory mem
, *newmem
, *oldmem
;
522 unsigned long size
= offsetof(struct vhost_memory
, regions
);
523 if (copy_from_user(&mem
, m
, size
))
527 if (mem
.nregions
> VHOST_MEMORY_MAX_NREGIONS
)
529 newmem
= kmalloc(size
+ mem
.nregions
* sizeof *m
->regions
, GFP_KERNEL
);
533 memcpy(newmem
, &mem
, size
);
534 if (copy_from_user(newmem
->regions
, m
->regions
,
535 mem
.nregions
* sizeof *m
->regions
)) {
540 if (!memory_access_ok(d
, newmem
, vhost_has_feature(d
, VHOST_F_LOG_ALL
))) {
544 oldmem
= rcu_dereference_protected(d
->memory
,
545 lockdep_is_held(&d
->mutex
));
546 rcu_assign_pointer(d
->memory
, newmem
);
552 static int init_used(struct vhost_virtqueue
*vq
,
553 struct vring_used __user
*used
)
555 int r
= put_user(vq
->used_flags
, &used
->flags
);
558 return get_user(vq
->last_used_idx
, &used
->idx
);
561 static long vhost_set_vring(struct vhost_dev
*d
, int ioctl
, void __user
*argp
)
563 struct file
*eventfp
, *filep
= NULL
,
564 *pollstart
= NULL
, *pollstop
= NULL
;
565 struct eventfd_ctx
*ctx
= NULL
;
566 u32 __user
*idxp
= argp
;
567 struct vhost_virtqueue
*vq
;
568 struct vhost_vring_state s
;
569 struct vhost_vring_file f
;
570 struct vhost_vring_addr a
;
574 r
= get_user(idx
, idxp
);
582 mutex_lock(&vq
->mutex
);
585 case VHOST_SET_VRING_NUM
:
586 /* Resizing ring with an active backend?
587 * You don't want to do that. */
588 if (vq
->private_data
) {
592 if (copy_from_user(&s
, argp
, sizeof s
)) {
596 if (!s
.num
|| s
.num
> 0xffff || (s
.num
& (s
.num
- 1))) {
602 case VHOST_SET_VRING_BASE
:
603 /* Moving base with an active backend?
604 * You don't want to do that. */
605 if (vq
->private_data
) {
609 if (copy_from_user(&s
, argp
, sizeof s
)) {
613 if (s
.num
> 0xffff) {
617 vq
->last_avail_idx
= s
.num
;
618 /* Forget the cached index value. */
619 vq
->avail_idx
= vq
->last_avail_idx
;
621 case VHOST_GET_VRING_BASE
:
623 s
.num
= vq
->last_avail_idx
;
624 if (copy_to_user(argp
, &s
, sizeof s
))
627 case VHOST_SET_VRING_ADDR
:
628 if (copy_from_user(&a
, argp
, sizeof a
)) {
632 if (a
.flags
& ~(0x1 << VHOST_VRING_F_LOG
)) {
636 /* For 32bit, verify that the top 32bits of the user
637 data are set to zero. */
638 if ((u64
)(unsigned long)a
.desc_user_addr
!= a
.desc_user_addr
||
639 (u64
)(unsigned long)a
.used_user_addr
!= a
.used_user_addr
||
640 (u64
)(unsigned long)a
.avail_user_addr
!= a
.avail_user_addr
) {
644 if ((a
.avail_user_addr
& (sizeof *vq
->avail
->ring
- 1)) ||
645 (a
.used_user_addr
& (sizeof *vq
->used
->ring
- 1)) ||
646 (a
.log_guest_addr
& (sizeof *vq
->used
->ring
- 1))) {
651 /* We only verify access here if backend is configured.
652 * If it is not, we don't as size might not have been setup.
653 * We will verify when backend is configured. */
654 if (vq
->private_data
) {
655 if (!vq_access_ok(vq
->num
,
656 (void __user
*)(unsigned long)a
.desc_user_addr
,
657 (void __user
*)(unsigned long)a
.avail_user_addr
,
658 (void __user
*)(unsigned long)a
.used_user_addr
)) {
663 /* Also validate log access for used ring if enabled. */
664 if ((a
.flags
& (0x1 << VHOST_VRING_F_LOG
)) &&
665 !log_access_ok(vq
->log_base
, a
.log_guest_addr
,
667 vq
->num
* sizeof *vq
->used
->ring
)) {
673 r
= init_used(vq
, (struct vring_used __user
*)(unsigned long)
677 vq
->log_used
= !!(a
.flags
& (0x1 << VHOST_VRING_F_LOG
));
678 vq
->desc
= (void __user
*)(unsigned long)a
.desc_user_addr
;
679 vq
->avail
= (void __user
*)(unsigned long)a
.avail_user_addr
;
680 vq
->log_addr
= a
.log_guest_addr
;
681 vq
->used
= (void __user
*)(unsigned long)a
.used_user_addr
;
683 case VHOST_SET_VRING_KICK
:
684 if (copy_from_user(&f
, argp
, sizeof f
)) {
688 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
689 if (IS_ERR(eventfp
)) {
690 r
= PTR_ERR(eventfp
);
693 if (eventfp
!= vq
->kick
) {
694 pollstop
= filep
= vq
->kick
;
695 pollstart
= vq
->kick
= eventfp
;
699 case VHOST_SET_VRING_CALL
:
700 if (copy_from_user(&f
, argp
, sizeof f
)) {
704 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
705 if (IS_ERR(eventfp
)) {
706 r
= PTR_ERR(eventfp
);
709 if (eventfp
!= vq
->call
) {
713 vq
->call_ctx
= eventfp
?
714 eventfd_ctx_fileget(eventfp
) : NULL
;
718 case VHOST_SET_VRING_ERR
:
719 if (copy_from_user(&f
, argp
, sizeof f
)) {
723 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
724 if (IS_ERR(eventfp
)) {
725 r
= PTR_ERR(eventfp
);
728 if (eventfp
!= vq
->error
) {
732 vq
->error_ctx
= eventfp
?
733 eventfd_ctx_fileget(eventfp
) : NULL
;
741 if (pollstop
&& vq
->handle_kick
)
742 vhost_poll_stop(&vq
->poll
);
745 eventfd_ctx_put(ctx
);
749 if (pollstart
&& vq
->handle_kick
)
750 vhost_poll_start(&vq
->poll
, vq
->kick
);
752 mutex_unlock(&vq
->mutex
);
754 if (pollstop
&& vq
->handle_kick
)
755 vhost_poll_flush(&vq
->poll
);
759 /* Caller must have device mutex */
760 long vhost_dev_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, unsigned long arg
)
762 void __user
*argp
= (void __user
*)arg
;
763 struct file
*eventfp
, *filep
= NULL
;
764 struct eventfd_ctx
*ctx
= NULL
;
769 /* If you are not the owner, you can become one */
770 if (ioctl
== VHOST_SET_OWNER
) {
771 r
= vhost_dev_set_owner(d
);
775 /* You must be the owner to do anything else */
776 r
= vhost_dev_check_owner(d
);
781 case VHOST_SET_MEM_TABLE
:
782 r
= vhost_set_memory(d
, argp
);
784 case VHOST_SET_LOG_BASE
:
785 if (copy_from_user(&p
, argp
, sizeof p
)) {
789 if ((u64
)(unsigned long)p
!= p
) {
793 for (i
= 0; i
< d
->nvqs
; ++i
) {
794 struct vhost_virtqueue
*vq
;
795 void __user
*base
= (void __user
*)(unsigned long)p
;
797 mutex_lock(&vq
->mutex
);
798 /* If ring is inactive, will check when it's enabled. */
799 if (vq
->private_data
&& !vq_log_access_ok(vq
, base
))
803 mutex_unlock(&vq
->mutex
);
806 case VHOST_SET_LOG_FD
:
807 r
= get_user(fd
, (int __user
*)argp
);
810 eventfp
= fd
== -1 ? NULL
: eventfd_fget(fd
);
811 if (IS_ERR(eventfp
)) {
812 r
= PTR_ERR(eventfp
);
815 if (eventfp
!= d
->log_file
) {
818 d
->log_ctx
= eventfp
?
819 eventfd_ctx_fileget(eventfp
) : NULL
;
822 for (i
= 0; i
< d
->nvqs
; ++i
) {
823 mutex_lock(&d
->vqs
[i
].mutex
);
824 d
->vqs
[i
].log_ctx
= d
->log_ctx
;
825 mutex_unlock(&d
->vqs
[i
].mutex
);
828 eventfd_ctx_put(ctx
);
833 r
= vhost_set_vring(d
, ioctl
, argp
);
840 static const struct vhost_memory_region
*find_region(struct vhost_memory
*mem
,
841 __u64 addr
, __u32 len
)
843 struct vhost_memory_region
*reg
;
845 /* linear search is not brilliant, but we really have on the order of 6
846 * regions in practice */
847 for (i
= 0; i
< mem
->nregions
; ++i
) {
848 reg
= mem
->regions
+ i
;
849 if (reg
->guest_phys_addr
<= addr
&&
850 reg
->guest_phys_addr
+ reg
->memory_size
- 1 >= addr
)
856 /* TODO: This is really inefficient. We need something like get_user()
857 * (instruction directly accesses the data, with an exception table entry
858 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
860 static int set_bit_to_user(int nr
, void __user
*addr
)
862 unsigned long log
= (unsigned long)addr
;
865 int bit
= nr
+ (log
% PAGE_SIZE
) * 8;
867 r
= get_user_pages_fast(log
, 1, 1, &page
);
871 base
= kmap_atomic(page
, KM_USER0
);
873 kunmap_atomic(base
, KM_USER0
);
874 set_page_dirty_lock(page
);
879 static int log_write(void __user
*log_base
,
880 u64 write_address
, u64 write_length
)
885 write_address
/= VHOST_PAGE_SIZE
;
887 u64 base
= (u64
)(unsigned long)log_base
;
888 u64 log
= base
+ write_address
/ 8;
889 int bit
= write_address
% 8;
890 if ((u64
)(unsigned long)log
!= log
)
892 r
= set_bit_to_user(bit
, (void __user
*)(unsigned long)log
);
895 if (write_length
<= VHOST_PAGE_SIZE
)
897 write_length
-= VHOST_PAGE_SIZE
;
898 write_address
+= VHOST_PAGE_SIZE
;
903 int vhost_log_write(struct vhost_virtqueue
*vq
, struct vhost_log
*log
,
904 unsigned int log_num
, u64 len
)
908 /* Make sure data written is seen before log. */
910 for (i
= 0; i
< log_num
; ++i
) {
911 u64 l
= min(log
[i
].len
, len
);
912 r
= log_write(vq
->log_base
, log
[i
].addr
, l
);
918 eventfd_signal(vq
->log_ctx
, 1);
922 /* Length written exceeds what we have stored. This is a bug. */
927 static int translate_desc(struct vhost_dev
*dev
, u64 addr
, u32 len
,
928 struct iovec iov
[], int iov_size
)
930 const struct vhost_memory_region
*reg
;
931 struct vhost_memory
*mem
;
938 mem
= rcu_dereference(dev
->memory
);
939 while ((u64
)len
> s
) {
941 if (unlikely(ret
>= iov_size
)) {
945 reg
= find_region(mem
, addr
, len
);
946 if (unlikely(!reg
)) {
951 size
= reg
->memory_size
- addr
+ reg
->guest_phys_addr
;
952 _iov
->iov_len
= min((u64
)len
, size
);
953 _iov
->iov_base
= (void __user
*)(unsigned long)
954 (reg
->userspace_addr
+ addr
- reg
->guest_phys_addr
);
964 /* Each buffer in the virtqueues is actually a chain of descriptors. This
965 * function returns the next descriptor in the chain,
966 * or -1U if we're at the end. */
967 static unsigned next_desc(struct vring_desc
*desc
)
971 /* If this descriptor says it doesn't chain, we're done. */
972 if (!(desc
->flags
& VRING_DESC_F_NEXT
))
975 /* Check they're not leading us off end of descriptors. */
977 /* Make sure compiler knows to grab that: we don't want it changing! */
978 /* We will use the result as an index in an array, so most
979 * architectures only need a compiler barrier here. */
980 read_barrier_depends();
985 static int get_indirect(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
,
986 struct iovec iov
[], unsigned int iov_size
,
987 unsigned int *out_num
, unsigned int *in_num
,
988 struct vhost_log
*log
, unsigned int *log_num
,
989 struct vring_desc
*indirect
)
991 struct vring_desc desc
;
992 unsigned int i
= 0, count
, found
= 0;
996 if (unlikely(indirect
->len
% sizeof desc
)) {
997 vq_err(vq
, "Invalid length in indirect descriptor: "
998 "len 0x%llx not multiple of 0x%zx\n",
999 (unsigned long long)indirect
->len
,
1004 ret
= translate_desc(dev
, indirect
->addr
, indirect
->len
, vq
->indirect
,
1006 if (unlikely(ret
< 0)) {
1007 vq_err(vq
, "Translation failure %d in indirect.\n", ret
);
1011 /* We will use the result as an address to read from, so most
1012 * architectures only need a compiler barrier here. */
1013 read_barrier_depends();
1015 count
= indirect
->len
/ sizeof desc
;
1016 /* Buffers are chained via a 16 bit next field, so
1017 * we can have at most 2^16 of these. */
1018 if (unlikely(count
> USHRT_MAX
+ 1)) {
1019 vq_err(vq
, "Indirect buffer length too big: %d\n",
1025 unsigned iov_count
= *in_num
+ *out_num
;
1026 if (unlikely(++found
> count
)) {
1027 vq_err(vq
, "Loop detected: last one at %u "
1028 "indirect size %u\n",
1032 if (unlikely(memcpy_fromiovec((unsigned char *)&desc
, vq
->indirect
,
1034 vq_err(vq
, "Failed indirect descriptor: idx %d, %zx\n",
1035 i
, (size_t)indirect
->addr
+ i
* sizeof desc
);
1038 if (unlikely(desc
.flags
& VRING_DESC_F_INDIRECT
)) {
1039 vq_err(vq
, "Nested indirect descriptor: idx %d, %zx\n",
1040 i
, (size_t)indirect
->addr
+ i
* sizeof desc
);
1044 ret
= translate_desc(dev
, desc
.addr
, desc
.len
, iov
+ iov_count
,
1045 iov_size
- iov_count
);
1046 if (unlikely(ret
< 0)) {
1047 vq_err(vq
, "Translation failure %d indirect idx %d\n",
1051 /* If this is an input descriptor, increment that count. */
1052 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1054 if (unlikely(log
)) {
1055 log
[*log_num
].addr
= desc
.addr
;
1056 log
[*log_num
].len
= desc
.len
;
1060 /* If it's an output descriptor, they're all supposed
1061 * to come before any input descriptors. */
1062 if (unlikely(*in_num
)) {
1063 vq_err(vq
, "Indirect descriptor "
1064 "has out after in: idx %d\n", i
);
1069 } while ((i
= next_desc(&desc
)) != -1);
1073 /* This looks in the virtqueue and for the first available buffer, and converts
1074 * it to an iovec for convenient access. Since descriptors consist of some
1075 * number of output then some number of input descriptors, it's actually two
1076 * iovecs, but we pack them into one and note how many of each there were.
1078 * This function returns the descriptor number found, or vq->num (which is
1079 * never a valid descriptor number) if none was found. A negative code is
1080 * returned on error. */
1081 int vhost_get_vq_desc(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
,
1082 struct iovec iov
[], unsigned int iov_size
,
1083 unsigned int *out_num
, unsigned int *in_num
,
1084 struct vhost_log
*log
, unsigned int *log_num
)
1086 struct vring_desc desc
;
1087 unsigned int i
, head
, found
= 0;
1091 /* Check it isn't doing very strange things with descriptor numbers. */
1092 last_avail_idx
= vq
->last_avail_idx
;
1093 if (unlikely(get_user(vq
->avail_idx
, &vq
->avail
->idx
))) {
1094 vq_err(vq
, "Failed to access avail idx at %p\n",
1099 if (unlikely((u16
)(vq
->avail_idx
- last_avail_idx
) > vq
->num
)) {
1100 vq_err(vq
, "Guest moved used index from %u to %u",
1101 last_avail_idx
, vq
->avail_idx
);
1105 /* If there's nothing new since last we looked, return invalid. */
1106 if (vq
->avail_idx
== last_avail_idx
)
1109 /* Only get avail ring entries after they have been exposed by guest. */
1112 /* Grab the next descriptor number they're advertising, and increment
1113 * the index we've seen. */
1114 if (unlikely(get_user(head
,
1115 &vq
->avail
->ring
[last_avail_idx
% vq
->num
]))) {
1116 vq_err(vq
, "Failed to read head: idx %d address %p\n",
1118 &vq
->avail
->ring
[last_avail_idx
% vq
->num
]);
1122 /* If their number is silly, that's an error. */
1123 if (unlikely(head
>= vq
->num
)) {
1124 vq_err(vq
, "Guest says index %u > %u is available",
1129 /* When we start there are none of either input nor output. */
1130 *out_num
= *in_num
= 0;
1136 unsigned iov_count
= *in_num
+ *out_num
;
1137 if (unlikely(i
>= vq
->num
)) {
1138 vq_err(vq
, "Desc index is %u > %u, head = %u",
1142 if (unlikely(++found
> vq
->num
)) {
1143 vq_err(vq
, "Loop detected: last one at %u "
1144 "vq size %u head %u\n",
1148 ret
= copy_from_user(&desc
, vq
->desc
+ i
, sizeof desc
);
1149 if (unlikely(ret
)) {
1150 vq_err(vq
, "Failed to get descriptor: idx %d addr %p\n",
1154 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1155 ret
= get_indirect(dev
, vq
, iov
, iov_size
,
1157 log
, log_num
, &desc
);
1158 if (unlikely(ret
< 0)) {
1159 vq_err(vq
, "Failure detected "
1160 "in indirect descriptor at idx %d\n", i
);
1166 ret
= translate_desc(dev
, desc
.addr
, desc
.len
, iov
+ iov_count
,
1167 iov_size
- iov_count
);
1168 if (unlikely(ret
< 0)) {
1169 vq_err(vq
, "Translation failure %d descriptor idx %d\n",
1173 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1174 /* If this is an input descriptor,
1175 * increment that count. */
1177 if (unlikely(log
)) {
1178 log
[*log_num
].addr
= desc
.addr
;
1179 log
[*log_num
].len
= desc
.len
;
1183 /* If it's an output descriptor, they're all supposed
1184 * to come before any input descriptors. */
1185 if (unlikely(*in_num
)) {
1186 vq_err(vq
, "Descriptor has out after in: "
1192 } while ((i
= next_desc(&desc
)) != -1);
1194 /* On success, increment avail index. */
1195 vq
->last_avail_idx
++;
1199 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
1200 void vhost_discard_vq_desc(struct vhost_virtqueue
*vq
, int n
)
1202 vq
->last_avail_idx
-= n
;
1205 /* After we've used one of their buffers, we tell them about it. We'll then
1206 * want to notify the guest, using eventfd. */
1207 int vhost_add_used(struct vhost_virtqueue
*vq
, unsigned int head
, int len
)
1209 struct vring_used_elem __user
*used
;
1211 /* The virtqueue contains a ring of used buffers. Get a pointer to the
1212 * next entry in that used ring. */
1213 used
= &vq
->used
->ring
[vq
->last_used_idx
% vq
->num
];
1214 if (put_user(head
, &used
->id
)) {
1215 vq_err(vq
, "Failed to write used id");
1218 if (put_user(len
, &used
->len
)) {
1219 vq_err(vq
, "Failed to write used len");
1222 /* Make sure buffer is written before we update index. */
1224 if (put_user(vq
->last_used_idx
+ 1, &vq
->used
->idx
)) {
1225 vq_err(vq
, "Failed to increment used idx");
1228 if (unlikely(vq
->log_used
)) {
1229 /* Make sure data is seen before log. */
1231 /* Log used ring entry write. */
1232 log_write(vq
->log_base
,
1234 ((void __user
*)used
- (void __user
*)vq
->used
),
1236 /* Log used index update. */
1237 log_write(vq
->log_base
,
1238 vq
->log_addr
+ offsetof(struct vring_used
, idx
),
1239 sizeof vq
->used
->idx
);
1241 eventfd_signal(vq
->log_ctx
, 1);
1243 vq
->last_used_idx
++;
1247 static int __vhost_add_used_n(struct vhost_virtqueue
*vq
,
1248 struct vring_used_elem
*heads
,
1251 struct vring_used_elem __user
*used
;
1254 start
= vq
->last_used_idx
% vq
->num
;
1255 used
= vq
->used
->ring
+ start
;
1256 if (copy_to_user(used
, heads
, count
* sizeof *used
)) {
1257 vq_err(vq
, "Failed to write used");
1260 if (unlikely(vq
->log_used
)) {
1261 /* Make sure data is seen before log. */
1263 /* Log used ring entry write. */
1264 log_write(vq
->log_base
,
1266 ((void __user
*)used
- (void __user
*)vq
->used
),
1267 count
* sizeof *used
);
1269 vq
->last_used_idx
+= count
;
1273 /* After we've used one of their buffers, we tell them about it. We'll then
1274 * want to notify the guest, using eventfd. */
1275 int vhost_add_used_n(struct vhost_virtqueue
*vq
, struct vring_used_elem
*heads
,
1280 start
= vq
->last_used_idx
% vq
->num
;
1281 n
= vq
->num
- start
;
1283 r
= __vhost_add_used_n(vq
, heads
, n
);
1289 r
= __vhost_add_used_n(vq
, heads
, count
);
1291 /* Make sure buffer is written before we update index. */
1293 if (put_user(vq
->last_used_idx
, &vq
->used
->idx
)) {
1294 vq_err(vq
, "Failed to increment used idx");
1297 if (unlikely(vq
->log_used
)) {
1298 /* Log used index update. */
1299 log_write(vq
->log_base
,
1300 vq
->log_addr
+ offsetof(struct vring_used
, idx
),
1301 sizeof vq
->used
->idx
);
1303 eventfd_signal(vq
->log_ctx
, 1);
1308 /* This actually signals the guest, using eventfd. */
1309 void vhost_signal(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1312 /* Flush out used index updates. This is paired
1313 * with the barrier that the Guest executes when enabling
1317 if (get_user(flags
, &vq
->avail
->flags
)) {
1318 vq_err(vq
, "Failed to get flags");
1322 /* If they don't want an interrupt, don't signal, unless empty. */
1323 if ((flags
& VRING_AVAIL_F_NO_INTERRUPT
) &&
1324 (vq
->avail_idx
!= vq
->last_avail_idx
||
1325 !vhost_has_feature(dev
, VIRTIO_F_NOTIFY_ON_EMPTY
)))
1328 /* Signal the Guest tell them we used something up. */
1330 eventfd_signal(vq
->call_ctx
, 1);
1333 /* And here's the combo meal deal. Supersize me! */
1334 void vhost_add_used_and_signal(struct vhost_dev
*dev
,
1335 struct vhost_virtqueue
*vq
,
1336 unsigned int head
, int len
)
1338 vhost_add_used(vq
, head
, len
);
1339 vhost_signal(dev
, vq
);
1342 /* multi-buffer version of vhost_add_used_and_signal */
1343 void vhost_add_used_and_signal_n(struct vhost_dev
*dev
,
1344 struct vhost_virtqueue
*vq
,
1345 struct vring_used_elem
*heads
, unsigned count
)
1347 vhost_add_used_n(vq
, heads
, count
);
1348 vhost_signal(dev
, vq
);
1351 /* OK, now we need to know about added descriptors. */
1352 bool vhost_enable_notify(struct vhost_virtqueue
*vq
)
1356 if (!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
))
1358 vq
->used_flags
&= ~VRING_USED_F_NO_NOTIFY
;
1359 r
= put_user(vq
->used_flags
, &vq
->used
->flags
);
1361 vq_err(vq
, "Failed to enable notification at %p: %d\n",
1362 &vq
->used
->flags
, r
);
1365 /* They could have slipped one in as we were doing that: make
1366 * sure it's written, then check again. */
1368 r
= get_user(avail_idx
, &vq
->avail
->idx
);
1370 vq_err(vq
, "Failed to check avail idx at %p: %d\n",
1371 &vq
->avail
->idx
, r
);
1375 return avail_idx
!= vq
->avail_idx
;
1378 /* We don't need to be notified again. */
1379 void vhost_disable_notify(struct vhost_virtqueue
*vq
)
1382 if (vq
->used_flags
& VRING_USED_F_NO_NOTIFY
)
1384 vq
->used_flags
|= VRING_USED_F_NO_NOTIFY
;
1385 r
= put_user(vq
->used_flags
, &vq
->used
->flags
);
1387 vq_err(vq
, "Failed to enable notification at %p: %d\n",
1388 &vq
->used
->flags
, r
);