Commit | Line | Data |
---|---|---|
0a8a69dd RR |
1 | /* Virtio ring implementation. |
2 | * | |
3 | * Copyright 2007 Rusty Russell IBM Corporation | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
18 | */ | |
19 | #include <linux/virtio.h> | |
20 | #include <linux/virtio_ring.h> | |
e34f8725 | 21 | #include <linux/virtio_config.h> |
0a8a69dd | 22 | #include <linux/device.h> |
5a0e3ad6 | 23 | #include <linux/slab.h> |
b5a2c4f1 | 24 | #include <linux/module.h> |
e93300b1 | 25 | #include <linux/hrtimer.h> |
6abb2dd9 | 26 | #include <linux/kmemleak.h> |
0a8a69dd RR |
27 | |
28 | #ifdef DEBUG | |
29 | /* For development, we want to crash whenever the ring is screwed. */ | |
9499f5e7 RR |
30 | #define BAD_RING(_vq, fmt, args...) \ |
31 | do { \ | |
32 | dev_err(&(_vq)->vq.vdev->dev, \ | |
33 | "%s:"fmt, (_vq)->vq.name, ##args); \ | |
34 | BUG(); \ | |
35 | } while (0) | |
c5f841f1 RR |
36 | /* Caller is supposed to guarantee no reentry. */ |
37 | #define START_USE(_vq) \ | |
38 | do { \ | |
39 | if ((_vq)->in_use) \ | |
9499f5e7 RR |
40 | panic("%s:in_use = %i\n", \ |
41 | (_vq)->vq.name, (_vq)->in_use); \ | |
c5f841f1 | 42 | (_vq)->in_use = __LINE__; \ |
9499f5e7 | 43 | } while (0) |
3a35ce7d | 44 | #define END_USE(_vq) \ |
97a545ab | 45 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) |
0a8a69dd | 46 | #else |
9499f5e7 RR |
47 | #define BAD_RING(_vq, fmt, args...) \ |
48 | do { \ | |
49 | dev_err(&_vq->vq.vdev->dev, \ | |
50 | "%s:"fmt, (_vq)->vq.name, ##args); \ | |
51 | (_vq)->broken = true; \ | |
52 | } while (0) | |
0a8a69dd RR |
53 | #define START_USE(vq) |
54 | #define END_USE(vq) | |
55 | #endif | |
56 | ||
43b4f721 | 57 | struct vring_virtqueue { |
0a8a69dd RR |
58 | struct virtqueue vq; |
59 | ||
60 | /* Actual memory layout for this queue */ | |
61 | struct vring vring; | |
62 | ||
7b21e34f RR |
63 | /* Can we use weak barriers? */ |
64 | bool weak_barriers; | |
65 | ||
0a8a69dd RR |
66 | /* Other side has made a mess, don't try any more. */ |
67 | bool broken; | |
68 | ||
9fa29b9d MM |
69 | /* Host supports indirect buffers */ |
70 | bool indirect; | |
71 | ||
a5c262c5 MT |
72 | /* Host publishes avail event idx */ |
73 | bool event; | |
74 | ||
0a8a69dd RR |
75 | /* Head of free buffer list. */ |
76 | unsigned int free_head; | |
77 | /* Number we've added since last sync. */ | |
78 | unsigned int num_added; | |
79 | ||
80 | /* Last used index we've seen. */ | |
1bc4953e | 81 | u16 last_used_idx; |
0a8a69dd | 82 | |
f277ec42 VS |
83 | /* Last written value to avail->flags */ |
84 | u16 avail_flags_shadow; | |
85 | ||
86 | /* Last written value to avail->idx in guest byte order */ | |
87 | u16 avail_idx_shadow; | |
88 | ||
0a8a69dd | 89 | /* How to notify other side. FIXME: commonalize hcalls! */ |
46f9c2b9 | 90 | bool (*notify)(struct virtqueue *vq); |
0a8a69dd RR |
91 | |
92 | #ifdef DEBUG | |
93 | /* They're supposed to lock for us. */ | |
94 | unsigned int in_use; | |
e93300b1 RR |
95 | |
96 | /* Figure out if their kicks are too delayed. */ | |
97 | bool last_add_time_valid; | |
98 | ktime_t last_add_time; | |
0a8a69dd RR |
99 | #endif |
100 | ||
101 | /* Tokens for callbacks. */ | |
102 | void *data[]; | |
103 | }; | |
104 | ||
105 | #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) | |
106 | ||
00e6f3d9 MT |
107 | static struct vring_desc *alloc_indirect(struct virtqueue *_vq, |
108 | unsigned int total_sg, gfp_t gfp) | |
9fa29b9d MM |
109 | { |
110 | struct vring_desc *desc; | |
b25bd251 | 111 | unsigned int i; |
9fa29b9d | 112 | |
b92b1b89 WD |
113 | /* |
114 | * We require lowmem mappings for the descriptors because | |
115 | * otherwise virt_to_phys will give us bogus addresses in the | |
116 | * virtqueue. | |
117 | */ | |
82107539 | 118 | gfp &= ~__GFP_HIGHMEM; |
b92b1b89 | 119 | |
13816c76 | 120 | desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); |
9fa29b9d | 121 | if (!desc) |
b25bd251 | 122 | return NULL; |
9fa29b9d | 123 | |
b25bd251 | 124 | for (i = 0; i < total_sg; i++) |
00e6f3d9 | 125 | desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); |
b25bd251 | 126 | return desc; |
9fa29b9d MM |
127 | } |
128 | ||
13816c76 RR |
129 | static inline int virtqueue_add(struct virtqueue *_vq, |
130 | struct scatterlist *sgs[], | |
eeebf9b1 | 131 | unsigned int total_sg, |
13816c76 RR |
132 | unsigned int out_sgs, |
133 | unsigned int in_sgs, | |
134 | void *data, | |
135 | gfp_t gfp) | |
0a8a69dd RR |
136 | { |
137 | struct vring_virtqueue *vq = to_vvq(_vq); | |
13816c76 | 138 | struct scatterlist *sg; |
b25bd251 RR |
139 | struct vring_desc *desc; |
140 | unsigned int i, n, avail, descs_used, uninitialized_var(prev); | |
1fe9b6fe | 141 | int head; |
b25bd251 | 142 | bool indirect; |
0a8a69dd | 143 | |
9fa29b9d MM |
144 | START_USE(vq); |
145 | ||
0a8a69dd | 146 | BUG_ON(data == NULL); |
9fa29b9d | 147 | |
70670444 RR |
148 | if (unlikely(vq->broken)) { |
149 | END_USE(vq); | |
150 | return -EIO; | |
151 | } | |
152 | ||
e93300b1 RR |
153 | #ifdef DEBUG |
154 | { | |
155 | ktime_t now = ktime_get(); | |
156 | ||
157 | /* No kick or get, with .1 second between? Warn. */ | |
158 | if (vq->last_add_time_valid) | |
159 | WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) | |
160 | > 100); | |
161 | vq->last_add_time = now; | |
162 | vq->last_add_time_valid = true; | |
163 | } | |
164 | #endif | |
165 | ||
b25bd251 RR |
166 | BUG_ON(total_sg > vq->vring.num); |
167 | BUG_ON(total_sg == 0); | |
168 | ||
169 | head = vq->free_head; | |
170 | ||
9fa29b9d MM |
171 | /* If the host supports indirect descriptor tables, and we have multiple |
172 | * buffers, then go indirect. FIXME: tune this threshold */ | |
b25bd251 | 173 | if (vq->indirect && total_sg > 1 && vq->vq.num_free) |
00e6f3d9 | 174 | desc = alloc_indirect(_vq, total_sg, gfp); |
b25bd251 RR |
175 | else |
176 | desc = NULL; | |
177 | ||
178 | if (desc) { | |
179 | /* Use a single buffer which doesn't continue */ | |
00e6f3d9 MT |
180 | vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT); |
181 | vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc)); | |
b25bd251 RR |
182 | /* avoid kmemleak false positive (hidden by virt_to_phys) */ |
183 | kmemleak_ignore(desc); | |
00e6f3d9 | 184 | vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc)); |
b25bd251 RR |
185 | |
186 | /* Set up rest to use this indirect table. */ | |
187 | i = 0; | |
188 | descs_used = 1; | |
189 | indirect = true; | |
190 | } else { | |
191 | desc = vq->vring.desc; | |
192 | i = head; | |
193 | descs_used = total_sg; | |
194 | indirect = false; | |
9fa29b9d MM |
195 | } |
196 | ||
b25bd251 | 197 | if (vq->vq.num_free < descs_used) { |
0a8a69dd | 198 | pr_debug("Can't add buf len %i - avail = %i\n", |
b25bd251 | 199 | descs_used, vq->vq.num_free); |
44653eae RR |
200 | /* FIXME: for historical reasons, we force a notify here if |
201 | * there are outgoing parts to the buffer. Presumably the | |
202 | * host should service the ring ASAP. */ | |
13816c76 | 203 | if (out_sgs) |
44653eae | 204 | vq->notify(&vq->vq); |
0a8a69dd RR |
205 | END_USE(vq); |
206 | return -ENOSPC; | |
207 | } | |
208 | ||
209 | /* We're about to use some buffers from the free list. */ | |
b25bd251 | 210 | vq->vq.num_free -= descs_used; |
13816c76 | 211 | |
13816c76 | 212 | for (n = 0; n < out_sgs; n++) { |
eeebf9b1 | 213 | for (sg = sgs[n]; sg; sg = sg_next(sg)) { |
00e6f3d9 MT |
214 | desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); |
215 | desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); | |
216 | desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); | |
13816c76 | 217 | prev = i; |
00e6f3d9 | 218 | i = virtio16_to_cpu(_vq->vdev, desc[i].next); |
13816c76 | 219 | } |
0a8a69dd | 220 | } |
13816c76 | 221 | for (; n < (out_sgs + in_sgs); n++) { |
eeebf9b1 | 222 | for (sg = sgs[n]; sg; sg = sg_next(sg)) { |
00e6f3d9 MT |
223 | desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); |
224 | desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); | |
225 | desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); | |
13816c76 | 226 | prev = i; |
00e6f3d9 | 227 | i = virtio16_to_cpu(_vq->vdev, desc[i].next); |
13816c76 | 228 | } |
0a8a69dd RR |
229 | } |
230 | /* Last one doesn't continue. */ | |
00e6f3d9 | 231 | desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); |
0a8a69dd RR |
232 | |
233 | /* Update free pointer */ | |
b25bd251 | 234 | if (indirect) |
00e6f3d9 | 235 | vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next); |
b25bd251 RR |
236 | else |
237 | vq->free_head = i; | |
0a8a69dd RR |
238 | |
239 | /* Set token. */ | |
240 | vq->data[head] = data; | |
241 | ||
242 | /* Put entry in available array (but don't update avail->idx until they | |
3b720b8c | 243 | * do sync). */ |
f277ec42 | 244 | avail = vq->avail_idx_shadow & (vq->vring.num - 1); |
00e6f3d9 | 245 | vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); |
0a8a69dd | 246 | |
ee7cd898 RR |
247 | /* Descriptors and available array need to be set before we expose the |
248 | * new available array entries. */ | |
a9a0fef7 | 249 | virtio_wmb(vq->weak_barriers); |
f277ec42 VS |
250 | vq->avail_idx_shadow++; |
251 | vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); | |
ee7cd898 RR |
252 | vq->num_added++; |
253 | ||
5e05bf58 TH |
254 | pr_debug("Added buffer head %i to %p\n", head, vq); |
255 | END_USE(vq); | |
256 | ||
ee7cd898 RR |
257 | /* This is very unlikely, but theoretically possible. Kick |
258 | * just in case. */ | |
259 | if (unlikely(vq->num_added == (1 << 16) - 1)) | |
260 | virtqueue_kick(_vq); | |
261 | ||
98e8c6bc | 262 | return 0; |
0a8a69dd | 263 | } |
13816c76 | 264 | |
13816c76 RR |
265 | /** |
266 | * virtqueue_add_sgs - expose buffers to other end | |
267 | * @vq: the struct virtqueue we're talking about. | |
268 | * @sgs: array of terminated scatterlists. | |
269 | * @out_num: the number of scatterlists readable by other side | |
270 | * @in_num: the number of scatterlists which are writable (after readable ones) | |
271 | * @data: the token identifying the buffer. | |
272 | * @gfp: how to do memory allocations (if necessary). | |
273 | * | |
274 | * Caller must ensure we don't call this with other virtqueue operations | |
275 | * at the same time (except where noted). | |
276 | * | |
70670444 | 277 | * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). |
13816c76 RR |
278 | */ |
279 | int virtqueue_add_sgs(struct virtqueue *_vq, | |
280 | struct scatterlist *sgs[], | |
281 | unsigned int out_sgs, | |
282 | unsigned int in_sgs, | |
283 | void *data, | |
284 | gfp_t gfp) | |
285 | { | |
eeebf9b1 | 286 | unsigned int i, total_sg = 0; |
13816c76 RR |
287 | |
288 | /* Count them first. */ | |
eeebf9b1 | 289 | for (i = 0; i < out_sgs + in_sgs; i++) { |
13816c76 RR |
290 | struct scatterlist *sg; |
291 | for (sg = sgs[i]; sg; sg = sg_next(sg)) | |
eeebf9b1 | 292 | total_sg++; |
13816c76 | 293 | } |
eeebf9b1 | 294 | return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); |
13816c76 RR |
295 | } |
296 | EXPORT_SYMBOL_GPL(virtqueue_add_sgs); | |
297 | ||
282edb36 RR |
298 | /** |
299 | * virtqueue_add_outbuf - expose output buffers to other end | |
300 | * @vq: the struct virtqueue we're talking about. | |
eeebf9b1 RR |
301 | * @sg: scatterlist (must be well-formed and terminated!) |
302 | * @num: the number of entries in @sg readable by other side | |
282edb36 RR |
303 | * @data: the token identifying the buffer. |
304 | * @gfp: how to do memory allocations (if necessary). | |
305 | * | |
306 | * Caller must ensure we don't call this with other virtqueue operations | |
307 | * at the same time (except where noted). | |
308 | * | |
70670444 | 309 | * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). |
282edb36 RR |
310 | */ |
311 | int virtqueue_add_outbuf(struct virtqueue *vq, | |
eeebf9b1 | 312 | struct scatterlist *sg, unsigned int num, |
282edb36 RR |
313 | void *data, |
314 | gfp_t gfp) | |
315 | { | |
eeebf9b1 | 316 | return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); |
282edb36 RR |
317 | } |
318 | EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); | |
319 | ||
320 | /** | |
321 | * virtqueue_add_inbuf - expose input buffers to other end | |
322 | * @vq: the struct virtqueue we're talking about. | |
eeebf9b1 RR |
323 | * @sg: scatterlist (must be well-formed and terminated!) |
324 | * @num: the number of entries in @sg writable by other side | |
282edb36 RR |
325 | * @data: the token identifying the buffer. |
326 | * @gfp: how to do memory allocations (if necessary). | |
327 | * | |
328 | * Caller must ensure we don't call this with other virtqueue operations | |
329 | * at the same time (except where noted). | |
330 | * | |
70670444 | 331 | * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). |
282edb36 RR |
332 | */ |
333 | int virtqueue_add_inbuf(struct virtqueue *vq, | |
eeebf9b1 | 334 | struct scatterlist *sg, unsigned int num, |
282edb36 RR |
335 | void *data, |
336 | gfp_t gfp) | |
337 | { | |
eeebf9b1 | 338 | return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); |
282edb36 RR |
339 | } |
340 | EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); | |
341 | ||
5dfc1762 | 342 | /** |
41f0377f | 343 | * virtqueue_kick_prepare - first half of split virtqueue_kick call. |
5dfc1762 RR |
344 | * @vq: the struct virtqueue |
345 | * | |
41f0377f RR |
346 | * Instead of virtqueue_kick(), you can do: |
347 | * if (virtqueue_kick_prepare(vq)) | |
348 | * virtqueue_notify(vq); | |
5dfc1762 | 349 | * |
41f0377f RR |
350 | * This is sometimes useful because the virtqueue_kick_prepare() needs |
351 | * to be serialized, but the actual virtqueue_notify() call does not. | |
5dfc1762 | 352 | */ |
41f0377f | 353 | bool virtqueue_kick_prepare(struct virtqueue *_vq) |
0a8a69dd RR |
354 | { |
355 | struct vring_virtqueue *vq = to_vvq(_vq); | |
a5c262c5 | 356 | u16 new, old; |
41f0377f RR |
357 | bool needs_kick; |
358 | ||
0a8a69dd | 359 | START_USE(vq); |
a72caae2 JW |
360 | /* We need to expose available array entries before checking avail |
361 | * event. */ | |
a9a0fef7 | 362 | virtio_mb(vq->weak_barriers); |
0a8a69dd | 363 | |
f277ec42 VS |
364 | old = vq->avail_idx_shadow - vq->num_added; |
365 | new = vq->avail_idx_shadow; | |
0a8a69dd RR |
366 | vq->num_added = 0; |
367 | ||
e93300b1 RR |
368 | #ifdef DEBUG |
369 | if (vq->last_add_time_valid) { | |
370 | WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), | |
371 | vq->last_add_time)) > 100); | |
372 | } | |
373 | vq->last_add_time_valid = false; | |
374 | #endif | |
375 | ||
41f0377f | 376 | if (vq->event) { |
00e6f3d9 | 377 | needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)), |
41f0377f RR |
378 | new, old); |
379 | } else { | |
00e6f3d9 | 380 | needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); |
41f0377f | 381 | } |
0a8a69dd | 382 | END_USE(vq); |
41f0377f RR |
383 | return needs_kick; |
384 | } | |
385 | EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); | |
386 | ||
387 | /** | |
388 | * virtqueue_notify - second half of split virtqueue_kick call. | |
389 | * @vq: the struct virtqueue | |
390 | * | |
391 | * This does not need to be serialized. | |
5b1bf7cb HG |
392 | * |
393 | * Returns false if host notify failed or queue is broken, otherwise true. | |
41f0377f | 394 | */ |
5b1bf7cb | 395 | bool virtqueue_notify(struct virtqueue *_vq) |
41f0377f RR |
396 | { |
397 | struct vring_virtqueue *vq = to_vvq(_vq); | |
398 | ||
5b1bf7cb HG |
399 | if (unlikely(vq->broken)) |
400 | return false; | |
401 | ||
41f0377f | 402 | /* Prod other side to tell it about changes. */ |
2342d6a6 | 403 | if (!vq->notify(_vq)) { |
5b1bf7cb HG |
404 | vq->broken = true; |
405 | return false; | |
406 | } | |
407 | return true; | |
41f0377f RR |
408 | } |
409 | EXPORT_SYMBOL_GPL(virtqueue_notify); | |
410 | ||
411 | /** | |
412 | * virtqueue_kick - update after add_buf | |
413 | * @vq: the struct virtqueue | |
414 | * | |
b3087e48 | 415 | * After one or more virtqueue_add_* calls, invoke this to kick |
41f0377f RR |
416 | * the other side. |
417 | * | |
418 | * Caller must ensure we don't call this with other virtqueue | |
419 | * operations at the same time (except where noted). | |
5b1bf7cb HG |
420 | * |
421 | * Returns false if kick failed, otherwise true. | |
41f0377f | 422 | */ |
5b1bf7cb | 423 | bool virtqueue_kick(struct virtqueue *vq) |
41f0377f RR |
424 | { |
425 | if (virtqueue_kick_prepare(vq)) | |
5b1bf7cb HG |
426 | return virtqueue_notify(vq); |
427 | return true; | |
0a8a69dd | 428 | } |
7c5e9ed0 | 429 | EXPORT_SYMBOL_GPL(virtqueue_kick); |
0a8a69dd RR |
430 | |
431 | static void detach_buf(struct vring_virtqueue *vq, unsigned int head) | |
432 | { | |
433 | unsigned int i; | |
434 | ||
435 | /* Clear data ptr. */ | |
436 | vq->data[head] = NULL; | |
437 | ||
438 | /* Put back on free list: find end */ | |
439 | i = head; | |
9fa29b9d MM |
440 | |
441 | /* Free the indirect table */ | |
00e6f3d9 MT |
442 | if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)) |
443 | kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr))); | |
9fa29b9d | 444 | |
00e6f3d9 MT |
445 | while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) { |
446 | i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next); | |
06ca287d | 447 | vq->vq.num_free++; |
0a8a69dd RR |
448 | } |
449 | ||
00e6f3d9 | 450 | vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head); |
0a8a69dd RR |
451 | vq->free_head = head; |
452 | /* Plus final descriptor */ | |
06ca287d | 453 | vq->vq.num_free++; |
0a8a69dd RR |
454 | } |
455 | ||
0a8a69dd RR |
456 | static inline bool more_used(const struct vring_virtqueue *vq) |
457 | { | |
00e6f3d9 | 458 | return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); |
0a8a69dd RR |
459 | } |
460 | ||
5dfc1762 RR |
461 | /** |
462 | * virtqueue_get_buf - get the next used buffer | |
463 | * @vq: the struct virtqueue we're talking about. | |
464 | * @len: the length written into the buffer | |
465 | * | |
466 | * If the driver wrote data into the buffer, @len will be set to the | |
467 | * amount written. This means you don't need to clear the buffer | |
468 | * beforehand to ensure there's no data leakage in the case of short | |
469 | * writes. | |
470 | * | |
471 | * Caller must ensure we don't call this with other virtqueue | |
472 | * operations at the same time (except where noted). | |
473 | * | |
474 | * Returns NULL if there are no used buffers, or the "data" token | |
b3087e48 | 475 | * handed to virtqueue_add_*(). |
5dfc1762 | 476 | */ |
7c5e9ed0 | 477 | void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) |
0a8a69dd RR |
478 | { |
479 | struct vring_virtqueue *vq = to_vvq(_vq); | |
480 | void *ret; | |
481 | unsigned int i; | |
3b720b8c | 482 | u16 last_used; |
0a8a69dd RR |
483 | |
484 | START_USE(vq); | |
485 | ||
5ef82752 RR |
486 | if (unlikely(vq->broken)) { |
487 | END_USE(vq); | |
488 | return NULL; | |
489 | } | |
490 | ||
0a8a69dd RR |
491 | if (!more_used(vq)) { |
492 | pr_debug("No more buffers in queue\n"); | |
493 | END_USE(vq); | |
494 | return NULL; | |
495 | } | |
496 | ||
2d61ba95 | 497 | /* Only get used array entries after they have been exposed by host. */ |
a9a0fef7 | 498 | virtio_rmb(vq->weak_barriers); |
2d61ba95 | 499 | |
3b720b8c | 500 | last_used = (vq->last_used_idx & (vq->vring.num - 1)); |
00e6f3d9 MT |
501 | i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id); |
502 | *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len); | |
0a8a69dd RR |
503 | |
504 | if (unlikely(i >= vq->vring.num)) { | |
505 | BAD_RING(vq, "id %u out of range\n", i); | |
506 | return NULL; | |
507 | } | |
508 | if (unlikely(!vq->data[i])) { | |
509 | BAD_RING(vq, "id %u is not a head!\n", i); | |
510 | return NULL; | |
511 | } | |
512 | ||
513 | /* detach_buf clears data, so grab it now. */ | |
514 | ret = vq->data[i]; | |
515 | detach_buf(vq, i); | |
516 | vq->last_used_idx++; | |
a5c262c5 MT |
517 | /* If we expect an interrupt for the next entry, tell host |
518 | * by writing event index and flush out the write before | |
519 | * the read in the next get_buf call. */ | |
788e5b3a MT |
520 | if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) |
521 | virtio_store_mb(vq->weak_barriers, | |
522 | &vring_used_event(&vq->vring), | |
523 | cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); | |
a5c262c5 | 524 | |
e93300b1 RR |
525 | #ifdef DEBUG |
526 | vq->last_add_time_valid = false; | |
527 | #endif | |
528 | ||
0a8a69dd RR |
529 | END_USE(vq); |
530 | return ret; | |
531 | } | |
7c5e9ed0 | 532 | EXPORT_SYMBOL_GPL(virtqueue_get_buf); |
0a8a69dd | 533 | |
5dfc1762 RR |
534 | /** |
535 | * virtqueue_disable_cb - disable callbacks | |
536 | * @vq: the struct virtqueue we're talking about. | |
537 | * | |
538 | * Note that this is not necessarily synchronous, hence unreliable and only | |
539 | * useful as an optimization. | |
540 | * | |
541 | * Unlike other operations, this need not be serialized. | |
542 | */ | |
7c5e9ed0 | 543 | void virtqueue_disable_cb(struct virtqueue *_vq) |
18445c4d RR |
544 | { |
545 | struct vring_virtqueue *vq = to_vvq(_vq); | |
546 | ||
f277ec42 VS |
547 | if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { |
548 | vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; | |
549 | vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); | |
550 | } | |
551 | ||
18445c4d | 552 | } |
7c5e9ed0 | 553 | EXPORT_SYMBOL_GPL(virtqueue_disable_cb); |
18445c4d | 554 | |
5dfc1762 | 555 | /** |
cc229884 | 556 | * virtqueue_enable_cb_prepare - restart callbacks after disable_cb |
5dfc1762 RR |
557 | * @vq: the struct virtqueue we're talking about. |
558 | * | |
cc229884 MT |
559 | * This re-enables callbacks; it returns current queue state |
560 | * in an opaque unsigned value. This value should be later tested by | |
561 | * virtqueue_poll, to detect a possible race between the driver checking for | |
562 | * more work, and enabling callbacks. | |
5dfc1762 RR |
563 | * |
564 | * Caller must ensure we don't call this with other virtqueue | |
565 | * operations at the same time (except where noted). | |
566 | */ | |
cc229884 | 567 | unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) |
0a8a69dd RR |
568 | { |
569 | struct vring_virtqueue *vq = to_vvq(_vq); | |
cc229884 | 570 | u16 last_used_idx; |
0a8a69dd RR |
571 | |
572 | START_USE(vq); | |
0a8a69dd RR |
573 | |
574 | /* We optimistically turn back on interrupts, then check if there was | |
575 | * more to do. */ | |
a5c262c5 MT |
576 | /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to |
577 | * either clear the flags bit or point the event index at the next | |
578 | * entry. Always do both to keep code simple. */ | |
f277ec42 VS |
579 | if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { |
580 | vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; | |
581 | vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); | |
582 | } | |
00e6f3d9 | 583 | vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); |
cc229884 MT |
584 | END_USE(vq); |
585 | return last_used_idx; | |
586 | } | |
587 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); | |
588 | ||
589 | /** | |
590 | * virtqueue_poll - query pending used buffers | |
591 | * @vq: the struct virtqueue we're talking about. | |
592 | * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). | |
593 | * | |
594 | * Returns "true" if there are pending used buffers in the queue. | |
595 | * | |
596 | * This does not need to be serialized. | |
597 | */ | |
598 | bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) | |
599 | { | |
600 | struct vring_virtqueue *vq = to_vvq(_vq); | |
601 | ||
a9a0fef7 | 602 | virtio_mb(vq->weak_barriers); |
00e6f3d9 | 603 | return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); |
cc229884 MT |
604 | } |
605 | EXPORT_SYMBOL_GPL(virtqueue_poll); | |
0a8a69dd | 606 | |
cc229884 MT |
607 | /** |
608 | * virtqueue_enable_cb - restart callbacks after disable_cb. | |
609 | * @vq: the struct virtqueue we're talking about. | |
610 | * | |
611 | * This re-enables callbacks; it returns "false" if there are pending | |
612 | * buffers in the queue, to detect a possible race between the driver | |
613 | * checking for more work, and enabling callbacks. | |
614 | * | |
615 | * Caller must ensure we don't call this with other virtqueue | |
616 | * operations at the same time (except where noted). | |
617 | */ | |
618 | bool virtqueue_enable_cb(struct virtqueue *_vq) | |
619 | { | |
620 | unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); | |
621 | return !virtqueue_poll(_vq, last_used_idx); | |
0a8a69dd | 622 | } |
7c5e9ed0 | 623 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb); |
0a8a69dd | 624 | |
5dfc1762 RR |
625 | /** |
626 | * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. | |
627 | * @vq: the struct virtqueue we're talking about. | |
628 | * | |
629 | * This re-enables callbacks but hints to the other side to delay | |
630 | * interrupts until most of the available buffers have been processed; | |
631 | * it returns "false" if there are many pending buffers in the queue, | |
632 | * to detect a possible race between the driver checking for more work, | |
633 | * and enabling callbacks. | |
634 | * | |
635 | * Caller must ensure we don't call this with other virtqueue | |
636 | * operations at the same time (except where noted). | |
637 | */ | |
7ab358c2 MT |
638 | bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) |
639 | { | |
640 | struct vring_virtqueue *vq = to_vvq(_vq); | |
641 | u16 bufs; | |
642 | ||
643 | START_USE(vq); | |
644 | ||
645 | /* We optimistically turn back on interrupts, then check if there was | |
646 | * more to do. */ | |
647 | /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to | |
648 | * either clear the flags bit or point the event index at the next | |
649 | * entry. Always do both to keep code simple. */ | |
f277ec42 VS |
650 | if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { |
651 | vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; | |
652 | vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); | |
653 | } | |
7ab358c2 | 654 | /* TODO: tune this threshold */ |
f277ec42 | 655 | bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; |
788e5b3a MT |
656 | |
657 | virtio_store_mb(vq->weak_barriers, | |
658 | &vring_used_event(&vq->vring), | |
659 | cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); | |
660 | ||
00e6f3d9 | 661 | if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { |
7ab358c2 MT |
662 | END_USE(vq); |
663 | return false; | |
664 | } | |
665 | ||
666 | END_USE(vq); | |
667 | return true; | |
668 | } | |
669 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); | |
670 | ||
5dfc1762 RR |
671 | /** |
672 | * virtqueue_detach_unused_buf - detach first unused buffer | |
673 | * @vq: the struct virtqueue we're talking about. | |
674 | * | |
b3087e48 | 675 | * Returns NULL or the "data" token handed to virtqueue_add_*(). |
5dfc1762 RR |
676 | * This is not valid on an active queue; it is useful only for device |
677 | * shutdown. | |
678 | */ | |
7c5e9ed0 | 679 | void *virtqueue_detach_unused_buf(struct virtqueue *_vq) |
c021eac4 SM |
680 | { |
681 | struct vring_virtqueue *vq = to_vvq(_vq); | |
682 | unsigned int i; | |
683 | void *buf; | |
684 | ||
685 | START_USE(vq); | |
686 | ||
687 | for (i = 0; i < vq->vring.num; i++) { | |
688 | if (!vq->data[i]) | |
689 | continue; | |
690 | /* detach_buf clears data, so grab it now. */ | |
691 | buf = vq->data[i]; | |
692 | detach_buf(vq, i); | |
f277ec42 VS |
693 | vq->avail_idx_shadow--; |
694 | vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); | |
c021eac4 SM |
695 | END_USE(vq); |
696 | return buf; | |
697 | } | |
698 | /* That should have freed everything. */ | |
06ca287d | 699 | BUG_ON(vq->vq.num_free != vq->vring.num); |
c021eac4 SM |
700 | |
701 | END_USE(vq); | |
702 | return NULL; | |
703 | } | |
7c5e9ed0 | 704 | EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); |
c021eac4 | 705 | |
0a8a69dd RR |
706 | irqreturn_t vring_interrupt(int irq, void *_vq) |
707 | { | |
708 | struct vring_virtqueue *vq = to_vvq(_vq); | |
709 | ||
710 | if (!more_used(vq)) { | |
711 | pr_debug("virtqueue interrupt with no work for %p\n", vq); | |
712 | return IRQ_NONE; | |
713 | } | |
714 | ||
715 | if (unlikely(vq->broken)) | |
716 | return IRQ_HANDLED; | |
717 | ||
718 | pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); | |
18445c4d RR |
719 | if (vq->vq.callback) |
720 | vq->vq.callback(&vq->vq); | |
0a8a69dd RR |
721 | |
722 | return IRQ_HANDLED; | |
723 | } | |
c6fd4701 | 724 | EXPORT_SYMBOL_GPL(vring_interrupt); |
0a8a69dd | 725 | |
17bb6d40 JW |
726 | struct virtqueue *vring_new_virtqueue(unsigned int index, |
727 | unsigned int num, | |
87c7d57c | 728 | unsigned int vring_align, |
0a8a69dd | 729 | struct virtio_device *vdev, |
7b21e34f | 730 | bool weak_barriers, |
0a8a69dd | 731 | void *pages, |
46f9c2b9 | 732 | bool (*notify)(struct virtqueue *), |
9499f5e7 RR |
733 | void (*callback)(struct virtqueue *), |
734 | const char *name) | |
0a8a69dd RR |
735 | { |
736 | struct vring_virtqueue *vq; | |
737 | unsigned int i; | |
738 | ||
42b36cc0 RR |
739 | /* We assume num is a power of 2. */ |
740 | if (num & (num - 1)) { | |
741 | dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); | |
742 | return NULL; | |
743 | } | |
744 | ||
0a8a69dd RR |
745 | vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); |
746 | if (!vq) | |
747 | return NULL; | |
748 | ||
87c7d57c | 749 | vring_init(&vq->vring, num, pages, vring_align); |
0a8a69dd RR |
750 | vq->vq.callback = callback; |
751 | vq->vq.vdev = vdev; | |
9499f5e7 | 752 | vq->vq.name = name; |
06ca287d RR |
753 | vq->vq.num_free = num; |
754 | vq->vq.index = index; | |
0a8a69dd | 755 | vq->notify = notify; |
7b21e34f | 756 | vq->weak_barriers = weak_barriers; |
0a8a69dd RR |
757 | vq->broken = false; |
758 | vq->last_used_idx = 0; | |
f277ec42 VS |
759 | vq->avail_flags_shadow = 0; |
760 | vq->avail_idx_shadow = 0; | |
0a8a69dd | 761 | vq->num_added = 0; |
9499f5e7 | 762 | list_add_tail(&vq->vq.list, &vdev->vqs); |
0a8a69dd RR |
763 | #ifdef DEBUG |
764 | vq->in_use = false; | |
e93300b1 | 765 | vq->last_add_time_valid = false; |
0a8a69dd RR |
766 | #endif |
767 | ||
9fa29b9d | 768 | vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); |
a5c262c5 | 769 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); |
9fa29b9d | 770 | |
0a8a69dd | 771 | /* No callback? Tell other side not to bother us. */ |
f277ec42 VS |
772 | if (!callback) { |
773 | vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; | |
774 | vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); | |
775 | } | |
0a8a69dd RR |
776 | |
777 | /* Put everything in free lists. */ | |
0a8a69dd | 778 | vq->free_head = 0; |
3b870624 | 779 | for (i = 0; i < num-1; i++) { |
00e6f3d9 | 780 | vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); |
3b870624 AS |
781 | vq->data[i] = NULL; |
782 | } | |
783 | vq->data[i] = NULL; | |
0a8a69dd RR |
784 | |
785 | return &vq->vq; | |
786 | } | |
c6fd4701 | 787 | EXPORT_SYMBOL_GPL(vring_new_virtqueue); |
0a8a69dd RR |
788 | |
789 | void vring_del_virtqueue(struct virtqueue *vq) | |
790 | { | |
9499f5e7 | 791 | list_del(&vq->list); |
0a8a69dd RR |
792 | kfree(to_vvq(vq)); |
793 | } | |
c6fd4701 | 794 | EXPORT_SYMBOL_GPL(vring_del_virtqueue); |
0a8a69dd | 795 | |
e34f8725 RR |
796 | /* Manipulates transport-specific feature bits. */ |
797 | void vring_transport_features(struct virtio_device *vdev) | |
798 | { | |
799 | unsigned int i; | |
800 | ||
801 | for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { | |
802 | switch (i) { | |
9fa29b9d MM |
803 | case VIRTIO_RING_F_INDIRECT_DESC: |
804 | break; | |
a5c262c5 MT |
805 | case VIRTIO_RING_F_EVENT_IDX: |
806 | break; | |
747ae34a MT |
807 | case VIRTIO_F_VERSION_1: |
808 | break; | |
e34f8725 RR |
809 | default: |
810 | /* We don't understand this bit. */ | |
e16e12be | 811 | __virtio_clear_bit(vdev, i); |
e34f8725 RR |
812 | } |
813 | } | |
814 | } | |
815 | EXPORT_SYMBOL_GPL(vring_transport_features); | |
816 | ||
5dfc1762 RR |
817 | /** |
818 | * virtqueue_get_vring_size - return the size of the virtqueue's vring | |
819 | * @vq: the struct virtqueue containing the vring of interest. | |
820 | * | |
821 | * Returns the size of the vring. This is mainly used for boasting to | |
822 | * userspace. Unlike other operations, this need not be serialized. | |
823 | */ | |
8f9f4668 RJ |
824 | unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) |
825 | { | |
826 | ||
827 | struct vring_virtqueue *vq = to_vvq(_vq); | |
828 | ||
829 | return vq->vring.num; | |
830 | } | |
831 | EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); | |
832 | ||
b3b32c94 HG |
833 | bool virtqueue_is_broken(struct virtqueue *_vq) |
834 | { | |
835 | struct vring_virtqueue *vq = to_vvq(_vq); | |
836 | ||
837 | return vq->broken; | |
838 | } | |
839 | EXPORT_SYMBOL_GPL(virtqueue_is_broken); | |
840 | ||
e2dcdfe9 RR |
841 | /* |
842 | * This should prevent the device from being used, allowing drivers to | |
843 | * recover. You may need to grab appropriate locks to flush. | |
844 | */ | |
845 | void virtio_break_device(struct virtio_device *dev) | |
846 | { | |
847 | struct virtqueue *_vq; | |
848 | ||
849 | list_for_each_entry(_vq, &dev->vqs, list) { | |
850 | struct vring_virtqueue *vq = to_vvq(_vq); | |
851 | vq->broken = true; | |
852 | } | |
853 | } | |
854 | EXPORT_SYMBOL_GPL(virtio_break_device); | |
855 | ||
89062652 CH |
856 | void *virtqueue_get_avail(struct virtqueue *_vq) |
857 | { | |
858 | struct vring_virtqueue *vq = to_vvq(_vq); | |
859 | ||
860 | return vq->vring.avail; | |
861 | } | |
862 | EXPORT_SYMBOL_GPL(virtqueue_get_avail); | |
863 | ||
864 | void *virtqueue_get_used(struct virtqueue *_vq) | |
865 | { | |
866 | struct vring_virtqueue *vq = to_vvq(_vq); | |
867 | ||
868 | return vq->vring.used; | |
869 | } | |
870 | EXPORT_SYMBOL_GPL(virtqueue_get_used); | |
871 | ||
c6fd4701 | 872 | MODULE_LICENSE("GPL"); |