1 /* Virtio ring implementation.
3 * Copyright 2007 Rusty Russell IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
26 #include <linux/kmemleak.h>
29 /* For development, we want to crash whenever the ring is screwed. */
30 #define BAD_RING(_vq, fmt, args...) \
32 dev_err(&(_vq)->vq.vdev->dev, \
33 "%s:"fmt, (_vq)->vq.name, ##args); \
36 /* Caller is supposed to guarantee no reentry. */
37 #define START_USE(_vq) \
40 panic("%s:in_use = %i\n", \
41 (_vq)->vq.name, (_vq)->in_use); \
42 (_vq)->in_use = __LINE__; \
44 #define END_USE(_vq) \
45 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
47 #define BAD_RING(_vq, fmt, args...) \
49 dev_err(&_vq->vq.vdev->dev, \
50 "%s:"fmt, (_vq)->vq.name, ##args); \
51 (_vq)->broken = true; \
57 struct vring_virtqueue
61 /* Actual memory layout for this queue */
64 /* Can we use weak barriers? */
67 /* Other side has made a mess, don't try any more. */
70 /* Host supports indirect buffers */
73 /* Host publishes avail event idx */
76 /* Head of free buffer list. */
77 unsigned int free_head
;
78 /* Number we've added since last sync. */
79 unsigned int num_added
;
81 /* Last used index we've seen. */
84 /* How to notify other side. FIXME: commonalize hcalls! */
85 bool (*notify
)(struct virtqueue
*vq
);
88 /* They're supposed to lock for us. */
91 /* Figure out if their kicks are too delayed. */
92 bool last_add_time_valid
;
93 ktime_t last_add_time
;
96 /* Tokens for callbacks. */
100 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
102 static struct vring_desc
*alloc_indirect(struct virtqueue
*_vq
,
103 unsigned int total_sg
, gfp_t gfp
)
105 struct vring_desc
*desc
;
109 * We require lowmem mappings for the descriptors because
110 * otherwise virt_to_phys will give us bogus addresses in the
113 gfp
&= ~(__GFP_HIGHMEM
| __GFP_HIGH
);
115 desc
= kmalloc(total_sg
* sizeof(struct vring_desc
), gfp
);
119 for (i
= 0; i
< total_sg
; i
++)
120 desc
[i
].next
= cpu_to_virtio16(_vq
->vdev
, i
+ 1);
124 static inline int virtqueue_add(struct virtqueue
*_vq
,
125 struct scatterlist
*sgs
[],
126 unsigned int total_sg
,
127 unsigned int out_sgs
,
132 struct vring_virtqueue
*vq
= to_vvq(_vq
);
133 struct scatterlist
*sg
;
134 struct vring_desc
*desc
;
135 unsigned int i
, n
, avail
, descs_used
, uninitialized_var(prev
);
141 BUG_ON(data
== NULL
);
143 if (unlikely(vq
->broken
)) {
150 ktime_t now
= ktime_get();
152 /* No kick or get, with .1 second between? Warn. */
153 if (vq
->last_add_time_valid
)
154 WARN_ON(ktime_to_ms(ktime_sub(now
, vq
->last_add_time
))
156 vq
->last_add_time
= now
;
157 vq
->last_add_time_valid
= true;
161 BUG_ON(total_sg
> vq
->vring
.num
);
162 BUG_ON(total_sg
== 0);
164 head
= vq
->free_head
;
166 /* If the host supports indirect descriptor tables, and we have multiple
167 * buffers, then go indirect. FIXME: tune this threshold */
168 if (vq
->indirect
&& total_sg
> 1 && vq
->vq
.num_free
)
169 desc
= alloc_indirect(_vq
, total_sg
, gfp
);
174 /* Use a single buffer which doesn't continue */
175 vq
->vring
.desc
[head
].flags
= cpu_to_virtio16(_vq
->vdev
, VRING_DESC_F_INDIRECT
);
176 vq
->vring
.desc
[head
].addr
= cpu_to_virtio64(_vq
->vdev
, virt_to_phys(desc
));
177 /* avoid kmemleak false positive (hidden by virt_to_phys) */
178 kmemleak_ignore(desc
);
179 vq
->vring
.desc
[head
].len
= cpu_to_virtio32(_vq
->vdev
, total_sg
* sizeof(struct vring_desc
));
181 /* Set up rest to use this indirect table. */
186 desc
= vq
->vring
.desc
;
188 descs_used
= total_sg
;
192 if (vq
->vq
.num_free
< descs_used
) {
193 pr_debug("Can't add buf len %i - avail = %i\n",
194 descs_used
, vq
->vq
.num_free
);
195 /* FIXME: for historical reasons, we force a notify here if
196 * there are outgoing parts to the buffer. Presumably the
197 * host should service the ring ASAP. */
204 /* We're about to use some buffers from the free list. */
205 vq
->vq
.num_free
-= descs_used
;
207 for (n
= 0; n
< out_sgs
; n
++) {
208 for (sg
= sgs
[n
]; sg
; sg
= sg_next(sg
)) {
209 desc
[i
].flags
= cpu_to_virtio16(_vq
->vdev
, VRING_DESC_F_NEXT
);
210 desc
[i
].addr
= cpu_to_virtio64(_vq
->vdev
, sg_phys(sg
));
211 desc
[i
].len
= cpu_to_virtio32(_vq
->vdev
, sg
->length
);
213 i
= virtio16_to_cpu(_vq
->vdev
, desc
[i
].next
);
216 for (; n
< (out_sgs
+ in_sgs
); n
++) {
217 for (sg
= sgs
[n
]; sg
; sg
= sg_next(sg
)) {
218 desc
[i
].flags
= cpu_to_virtio16(_vq
->vdev
, VRING_DESC_F_NEXT
| VRING_DESC_F_WRITE
);
219 desc
[i
].addr
= cpu_to_virtio64(_vq
->vdev
, sg_phys(sg
));
220 desc
[i
].len
= cpu_to_virtio32(_vq
->vdev
, sg
->length
);
222 i
= virtio16_to_cpu(_vq
->vdev
, desc
[i
].next
);
225 /* Last one doesn't continue. */
226 desc
[prev
].flags
&= cpu_to_virtio16(_vq
->vdev
, ~VRING_DESC_F_NEXT
);
228 /* Update free pointer */
230 vq
->free_head
= virtio16_to_cpu(_vq
->vdev
, vq
->vring
.desc
[head
].next
);
235 vq
->data
[head
] = data
;
237 /* Put entry in available array (but don't update avail->idx until they
239 avail
= virtio16_to_cpu(_vq
->vdev
, vq
->vring
.avail
->idx
) & (vq
->vring
.num
- 1);
240 vq
->vring
.avail
->ring
[avail
] = cpu_to_virtio16(_vq
->vdev
, head
);
242 /* Descriptors and available array need to be set before we expose the
243 * new available array entries. */
244 virtio_wmb(vq
->weak_barriers
);
245 vq
->vring
.avail
->idx
= cpu_to_virtio16(_vq
->vdev
, virtio16_to_cpu(_vq
->vdev
, vq
->vring
.avail
->idx
) + 1);
248 /* This is very unlikely, but theoretically possible. Kick
250 if (unlikely(vq
->num_added
== (1 << 16) - 1))
253 pr_debug("Added buffer head %i to %p\n", head
, vq
);
260 * virtqueue_add_sgs - expose buffers to other end
261 * @vq: the struct virtqueue we're talking about.
262 * @sgs: array of terminated scatterlists.
263 * @out_num: the number of scatterlists readable by other side
264 * @in_num: the number of scatterlists which are writable (after readable ones)
265 * @data: the token identifying the buffer.
266 * @gfp: how to do memory allocations (if necessary).
268 * Caller must ensure we don't call this with other virtqueue operations
269 * at the same time (except where noted).
271 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
273 int virtqueue_add_sgs(struct virtqueue
*_vq
,
274 struct scatterlist
*sgs
[],
275 unsigned int out_sgs
,
280 unsigned int i
, total_sg
= 0;
282 /* Count them first. */
283 for (i
= 0; i
< out_sgs
+ in_sgs
; i
++) {
284 struct scatterlist
*sg
;
285 for (sg
= sgs
[i
]; sg
; sg
= sg_next(sg
))
288 return virtqueue_add(_vq
, sgs
, total_sg
, out_sgs
, in_sgs
, data
, gfp
);
290 EXPORT_SYMBOL_GPL(virtqueue_add_sgs
);
293 * virtqueue_add_outbuf - expose output buffers to other end
294 * @vq: the struct virtqueue we're talking about.
295 * @sg: scatterlist (must be well-formed and terminated!)
296 * @num: the number of entries in @sg readable by other side
297 * @data: the token identifying the buffer.
298 * @gfp: how to do memory allocations (if necessary).
300 * Caller must ensure we don't call this with other virtqueue operations
301 * at the same time (except where noted).
303 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
305 int virtqueue_add_outbuf(struct virtqueue
*vq
,
306 struct scatterlist
*sg
, unsigned int num
,
310 return virtqueue_add(vq
, &sg
, num
, 1, 0, data
, gfp
);
312 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf
);
315 * virtqueue_add_inbuf - expose input buffers to other end
316 * @vq: the struct virtqueue we're talking about.
317 * @sg: scatterlist (must be well-formed and terminated!)
318 * @num: the number of entries in @sg writable by other side
319 * @data: the token identifying the buffer.
320 * @gfp: how to do memory allocations (if necessary).
322 * Caller must ensure we don't call this with other virtqueue operations
323 * at the same time (except where noted).
325 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
327 int virtqueue_add_inbuf(struct virtqueue
*vq
,
328 struct scatterlist
*sg
, unsigned int num
,
332 return virtqueue_add(vq
, &sg
, num
, 0, 1, data
, gfp
);
334 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf
);
337 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
338 * @vq: the struct virtqueue
340 * Instead of virtqueue_kick(), you can do:
341 * if (virtqueue_kick_prepare(vq))
342 * virtqueue_notify(vq);
344 * This is sometimes useful because the virtqueue_kick_prepare() needs
345 * to be serialized, but the actual virtqueue_notify() call does not.
347 bool virtqueue_kick_prepare(struct virtqueue
*_vq
)
349 struct vring_virtqueue
*vq
= to_vvq(_vq
);
354 /* We need to expose available array entries before checking avail
356 virtio_mb(vq
->weak_barriers
);
358 old
= virtio16_to_cpu(_vq
->vdev
, vq
->vring
.avail
->idx
) - vq
->num_added
;
359 new = virtio16_to_cpu(_vq
->vdev
, vq
->vring
.avail
->idx
);
363 if (vq
->last_add_time_valid
) {
364 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
365 vq
->last_add_time
)) > 100);
367 vq
->last_add_time_valid
= false;
371 needs_kick
= vring_need_event(virtio16_to_cpu(_vq
->vdev
, vring_avail_event(&vq
->vring
)),
374 needs_kick
= !(vq
->vring
.used
->flags
& cpu_to_virtio16(_vq
->vdev
, VRING_USED_F_NO_NOTIFY
));
379 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare
);
382 * virtqueue_notify - second half of split virtqueue_kick call.
383 * @vq: the struct virtqueue
385 * This does not need to be serialized.
387 * Returns false if host notify failed or queue is broken, otherwise true.
389 bool virtqueue_notify(struct virtqueue
*_vq
)
391 struct vring_virtqueue
*vq
= to_vvq(_vq
);
393 if (unlikely(vq
->broken
))
396 /* Prod other side to tell it about changes. */
397 if (!vq
->notify(_vq
)) {
403 EXPORT_SYMBOL_GPL(virtqueue_notify
);
406 * virtqueue_kick - update after add_buf
407 * @vq: the struct virtqueue
409 * After one or more virtqueue_add_* calls, invoke this to kick
412 * Caller must ensure we don't call this with other virtqueue
413 * operations at the same time (except where noted).
415 * Returns false if kick failed, otherwise true.
417 bool virtqueue_kick(struct virtqueue
*vq
)
419 if (virtqueue_kick_prepare(vq
))
420 return virtqueue_notify(vq
);
423 EXPORT_SYMBOL_GPL(virtqueue_kick
);
425 static void detach_buf(struct vring_virtqueue
*vq
, unsigned int head
)
429 /* Clear data ptr. */
430 vq
->data
[head
] = NULL
;
432 /* Put back on free list: find end */
435 /* Free the indirect table */
436 if (vq
->vring
.desc
[i
].flags
& cpu_to_virtio16(vq
->vq
.vdev
, VRING_DESC_F_INDIRECT
))
437 kfree(phys_to_virt(virtio64_to_cpu(vq
->vq
.vdev
, vq
->vring
.desc
[i
].addr
)));
439 while (vq
->vring
.desc
[i
].flags
& cpu_to_virtio16(vq
->vq
.vdev
, VRING_DESC_F_NEXT
)) {
440 i
= virtio16_to_cpu(vq
->vq
.vdev
, vq
->vring
.desc
[i
].next
);
444 vq
->vring
.desc
[i
].next
= cpu_to_virtio16(vq
->vq
.vdev
, vq
->free_head
);
445 vq
->free_head
= head
;
446 /* Plus final descriptor */
450 static inline bool more_used(const struct vring_virtqueue
*vq
)
452 return vq
->last_used_idx
!= virtio16_to_cpu(vq
->vq
.vdev
, vq
->vring
.used
->idx
);
456 * virtqueue_get_buf - get the next used buffer
457 * @vq: the struct virtqueue we're talking about.
458 * @len: the length written into the buffer
460 * If the driver wrote data into the buffer, @len will be set to the
461 * amount written. This means you don't need to clear the buffer
462 * beforehand to ensure there's no data leakage in the case of short
465 * Caller must ensure we don't call this with other virtqueue
466 * operations at the same time (except where noted).
468 * Returns NULL if there are no used buffers, or the "data" token
469 * handed to virtqueue_add_*().
471 void *virtqueue_get_buf(struct virtqueue
*_vq
, unsigned int *len
)
473 struct vring_virtqueue
*vq
= to_vvq(_vq
);
480 if (unlikely(vq
->broken
)) {
485 if (!more_used(vq
)) {
486 pr_debug("No more buffers in queue\n");
491 /* Only get used array entries after they have been exposed by host. */
492 virtio_rmb(vq
->weak_barriers
);
494 last_used
= (vq
->last_used_idx
& (vq
->vring
.num
- 1));
495 i
= virtio32_to_cpu(_vq
->vdev
, vq
->vring
.used
->ring
[last_used
].id
);
496 *len
= virtio32_to_cpu(_vq
->vdev
, vq
->vring
.used
->ring
[last_used
].len
);
498 if (unlikely(i
>= vq
->vring
.num
)) {
499 BAD_RING(vq
, "id %u out of range\n", i
);
502 if (unlikely(!vq
->data
[i
])) {
503 BAD_RING(vq
, "id %u is not a head!\n", i
);
507 /* detach_buf clears data, so grab it now. */
511 /* If we expect an interrupt for the next entry, tell host
512 * by writing event index and flush out the write before
513 * the read in the next get_buf call. */
514 if (!(vq
->vring
.avail
->flags
& cpu_to_virtio16(_vq
->vdev
, VRING_AVAIL_F_NO_INTERRUPT
))) {
515 vring_used_event(&vq
->vring
) = cpu_to_virtio16(_vq
->vdev
, vq
->last_used_idx
);
516 virtio_mb(vq
->weak_barriers
);
520 vq
->last_add_time_valid
= false;
526 EXPORT_SYMBOL_GPL(virtqueue_get_buf
);
529 * virtqueue_disable_cb - disable callbacks
530 * @vq: the struct virtqueue we're talking about.
532 * Note that this is not necessarily synchronous, hence unreliable and only
533 * useful as an optimization.
535 * Unlike other operations, this need not be serialized.
537 void virtqueue_disable_cb(struct virtqueue
*_vq
)
539 struct vring_virtqueue
*vq
= to_vvq(_vq
);
541 vq
->vring
.avail
->flags
|= cpu_to_virtio16(_vq
->vdev
, VRING_AVAIL_F_NO_INTERRUPT
);
543 EXPORT_SYMBOL_GPL(virtqueue_disable_cb
);
546 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
547 * @vq: the struct virtqueue we're talking about.
549 * This re-enables callbacks; it returns current queue state
550 * in an opaque unsigned value. This value should be later tested by
551 * virtqueue_poll, to detect a possible race between the driver checking for
552 * more work, and enabling callbacks.
554 * Caller must ensure we don't call this with other virtqueue
555 * operations at the same time (except where noted).
557 unsigned virtqueue_enable_cb_prepare(struct virtqueue
*_vq
)
559 struct vring_virtqueue
*vq
= to_vvq(_vq
);
564 /* We optimistically turn back on interrupts, then check if there was
566 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
567 * either clear the flags bit or point the event index at the next
568 * entry. Always do both to keep code simple. */
569 vq
->vring
.avail
->flags
&= cpu_to_virtio16(_vq
->vdev
, ~VRING_AVAIL_F_NO_INTERRUPT
);
570 vring_used_event(&vq
->vring
) = cpu_to_virtio16(_vq
->vdev
, last_used_idx
= vq
->last_used_idx
);
572 return last_used_idx
;
574 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare
);
577 * virtqueue_poll - query pending used buffers
578 * @vq: the struct virtqueue we're talking about.
579 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
581 * Returns "true" if there are pending used buffers in the queue.
583 * This does not need to be serialized.
585 bool virtqueue_poll(struct virtqueue
*_vq
, unsigned last_used_idx
)
587 struct vring_virtqueue
*vq
= to_vvq(_vq
);
589 virtio_mb(vq
->weak_barriers
);
590 return (u16
)last_used_idx
!= virtio16_to_cpu(_vq
->vdev
, vq
->vring
.used
->idx
);
592 EXPORT_SYMBOL_GPL(virtqueue_poll
);
595 * virtqueue_enable_cb - restart callbacks after disable_cb.
596 * @vq: the struct virtqueue we're talking about.
598 * This re-enables callbacks; it returns "false" if there are pending
599 * buffers in the queue, to detect a possible race between the driver
600 * checking for more work, and enabling callbacks.
602 * Caller must ensure we don't call this with other virtqueue
603 * operations at the same time (except where noted).
605 bool virtqueue_enable_cb(struct virtqueue
*_vq
)
607 unsigned last_used_idx
= virtqueue_enable_cb_prepare(_vq
);
608 return !virtqueue_poll(_vq
, last_used_idx
);
610 EXPORT_SYMBOL_GPL(virtqueue_enable_cb
);
613 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
614 * @vq: the struct virtqueue we're talking about.
616 * This re-enables callbacks but hints to the other side to delay
617 * interrupts until most of the available buffers have been processed;
618 * it returns "false" if there are many pending buffers in the queue,
619 * to detect a possible race between the driver checking for more work,
620 * and enabling callbacks.
622 * Caller must ensure we don't call this with other virtqueue
623 * operations at the same time (except where noted).
625 bool virtqueue_enable_cb_delayed(struct virtqueue
*_vq
)
627 struct vring_virtqueue
*vq
= to_vvq(_vq
);
632 /* We optimistically turn back on interrupts, then check if there was
634 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
635 * either clear the flags bit or point the event index at the next
636 * entry. Always do both to keep code simple. */
637 vq
->vring
.avail
->flags
&= cpu_to_virtio16(_vq
->vdev
, ~VRING_AVAIL_F_NO_INTERRUPT
);
638 /* TODO: tune this threshold */
639 bufs
= (u16
)(virtio16_to_cpu(_vq
->vdev
, vq
->vring
.avail
->idx
) - vq
->last_used_idx
) * 3 / 4;
640 vring_used_event(&vq
->vring
) = cpu_to_virtio16(_vq
->vdev
, vq
->last_used_idx
+ bufs
);
641 virtio_mb(vq
->weak_barriers
);
642 if (unlikely((u16
)(virtio16_to_cpu(_vq
->vdev
, vq
->vring
.used
->idx
) - vq
->last_used_idx
) > bufs
)) {
650 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed
);
653 * virtqueue_detach_unused_buf - detach first unused buffer
654 * @vq: the struct virtqueue we're talking about.
656 * Returns NULL or the "data" token handed to virtqueue_add_*().
657 * This is not valid on an active queue; it is useful only for device
660 void *virtqueue_detach_unused_buf(struct virtqueue
*_vq
)
662 struct vring_virtqueue
*vq
= to_vvq(_vq
);
668 for (i
= 0; i
< vq
->vring
.num
; i
++) {
671 /* detach_buf clears data, so grab it now. */
674 vq
->vring
.avail
->idx
= cpu_to_virtio16(_vq
->vdev
, virtio16_to_cpu(_vq
->vdev
, vq
->vring
.avail
->idx
) - 1);
678 /* That should have freed everything. */
679 BUG_ON(vq
->vq
.num_free
!= vq
->vring
.num
);
684 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf
);
686 irqreturn_t
vring_interrupt(int irq
, void *_vq
)
688 struct vring_virtqueue
*vq
= to_vvq(_vq
);
690 if (!more_used(vq
)) {
691 pr_debug("virtqueue interrupt with no work for %p\n", vq
);
695 if (unlikely(vq
->broken
))
698 pr_debug("virtqueue callback for %p (%p)\n", vq
, vq
->vq
.callback
);
700 vq
->vq
.callback(&vq
->vq
);
704 EXPORT_SYMBOL_GPL(vring_interrupt
);
706 struct virtqueue
*vring_new_virtqueue(unsigned int index
,
708 unsigned int vring_align
,
709 struct virtio_device
*vdev
,
712 bool (*notify
)(struct virtqueue
*),
713 void (*callback
)(struct virtqueue
*),
716 struct vring_virtqueue
*vq
;
719 /* We assume num is a power of 2. */
720 if (num
& (num
- 1)) {
721 dev_warn(&vdev
->dev
, "Bad virtqueue length %u\n", num
);
725 vq
= kmalloc(sizeof(*vq
) + sizeof(void *)*num
, GFP_KERNEL
);
729 vring_init(&vq
->vring
, num
, pages
, vring_align
);
730 vq
->vq
.callback
= callback
;
733 vq
->vq
.num_free
= num
;
734 vq
->vq
.index
= index
;
736 vq
->weak_barriers
= weak_barriers
;
738 vq
->last_used_idx
= 0;
740 list_add_tail(&vq
->vq
.list
, &vdev
->vqs
);
743 vq
->last_add_time_valid
= false;
746 vq
->indirect
= virtio_has_feature(vdev
, VIRTIO_RING_F_INDIRECT_DESC
);
747 vq
->event
= virtio_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
);
749 /* No callback? Tell other side not to bother us. */
751 vq
->vring
.avail
->flags
|= cpu_to_virtio16(vdev
, VRING_AVAIL_F_NO_INTERRUPT
);
753 /* Put everything in free lists. */
755 for (i
= 0; i
< num
-1; i
++) {
756 vq
->vring
.desc
[i
].next
= cpu_to_virtio16(vdev
, i
+ 1);
763 EXPORT_SYMBOL_GPL(vring_new_virtqueue
);
765 void vring_del_virtqueue(struct virtqueue
*vq
)
770 EXPORT_SYMBOL_GPL(vring_del_virtqueue
);
772 /* Manipulates transport-specific feature bits. */
773 void vring_transport_features(struct virtio_device
*vdev
)
777 for (i
= VIRTIO_TRANSPORT_F_START
; i
< VIRTIO_TRANSPORT_F_END
; i
++) {
779 case VIRTIO_RING_F_INDIRECT_DESC
:
781 case VIRTIO_RING_F_EVENT_IDX
:
783 case VIRTIO_F_VERSION_1
:
786 /* We don't understand this bit. */
787 __virtio_clear_bit(vdev
, i
);
791 EXPORT_SYMBOL_GPL(vring_transport_features
);
794 * virtqueue_get_vring_size - return the size of the virtqueue's vring
795 * @vq: the struct virtqueue containing the vring of interest.
797 * Returns the size of the vring. This is mainly used for boasting to
798 * userspace. Unlike other operations, this need not be serialized.
800 unsigned int virtqueue_get_vring_size(struct virtqueue
*_vq
)
803 struct vring_virtqueue
*vq
= to_vvq(_vq
);
805 return vq
->vring
.num
;
807 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size
);
809 bool virtqueue_is_broken(struct virtqueue
*_vq
)
811 struct vring_virtqueue
*vq
= to_vvq(_vq
);
815 EXPORT_SYMBOL_GPL(virtqueue_is_broken
);
818 * This should prevent the device from being used, allowing drivers to
819 * recover. You may need to grab appropriate locks to flush.
821 void virtio_break_device(struct virtio_device
*dev
)
823 struct virtqueue
*_vq
;
825 list_for_each_entry(_vq
, &dev
->vqs
, list
) {
826 struct vring_virtqueue
*vq
= to_vvq(_vq
);
830 EXPORT_SYMBOL_GPL(virtio_break_device
);
832 void *virtqueue_get_avail(struct virtqueue
*_vq
)
834 struct vring_virtqueue
*vq
= to_vvq(_vq
);
836 return vq
->vring
.avail
;
838 EXPORT_SYMBOL_GPL(virtqueue_get_avail
);
840 void *virtqueue_get_used(struct virtqueue
*_vq
)
842 struct vring_virtqueue
*vq
= to_vvq(_vq
);
844 return vq
->vring
.used
;
846 EXPORT_SYMBOL_GPL(virtqueue_get_used
);
848 MODULE_LICENSE("GPL");