ASoC: TWL4030: Add functionalty to reset the registers
[deliverable/linux.git] / drivers / virtio / virtio_ring.c
1 /* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23
24 /* virtio guest is communicating with a virtual "device" that actually runs on
25 * a host processor. Memory barriers are used to control SMP effects. */
26 #ifdef CONFIG_SMP
27 /* Where possible, use SMP barriers which are more lightweight than mandatory
28 * barriers, because mandatory barriers control MMIO effects on accesses
29 * through relaxed memory I/O windows (which virtio does not use). */
30 #define virtio_mb() smp_mb()
31 #define virtio_rmb() smp_rmb()
32 #define virtio_wmb() smp_wmb()
33 #else
34 /* We must force memory ordering even if guest is UP since host could be
35 * running on another CPU, but SMP barriers are defined to barrier() in that
36 * configuration. So fall back to mandatory barriers instead. */
37 #define virtio_mb() mb()
38 #define virtio_rmb() rmb()
39 #define virtio_wmb() wmb()
40 #endif
41
42 #ifdef DEBUG
43 /* For development, we want to crash whenever the ring is screwed. */
44 #define BAD_RING(_vq, fmt, args...) \
45 do { \
46 dev_err(&(_vq)->vq.vdev->dev, \
47 "%s:"fmt, (_vq)->vq.name, ##args); \
48 BUG(); \
49 } while (0)
50 /* Caller is supposed to guarantee no reentry. */
51 #define START_USE(_vq) \
52 do { \
53 if ((_vq)->in_use) \
54 panic("%s:in_use = %i\n", \
55 (_vq)->vq.name, (_vq)->in_use); \
56 (_vq)->in_use = __LINE__; \
57 } while (0)
58 #define END_USE(_vq) \
59 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
60 #else
61 #define BAD_RING(_vq, fmt, args...) \
62 do { \
63 dev_err(&_vq->vq.vdev->dev, \
64 "%s:"fmt, (_vq)->vq.name, ##args); \
65 (_vq)->broken = true; \
66 } while (0)
67 #define START_USE(vq)
68 #define END_USE(vq)
69 #endif
70
71 struct vring_virtqueue
72 {
73 struct virtqueue vq;
74
75 /* Actual memory layout for this queue */
76 struct vring vring;
77
78 /* Other side has made a mess, don't try any more. */
79 bool broken;
80
81 /* Host supports indirect buffers */
82 bool indirect;
83
84 /* Number of free buffers */
85 unsigned int num_free;
86 /* Head of free buffer list. */
87 unsigned int free_head;
88 /* Number we've added since last sync. */
89 unsigned int num_added;
90
91 /* Last used index we've seen. */
92 u16 last_used_idx;
93
94 /* How to notify other side. FIXME: commonalize hcalls! */
95 void (*notify)(struct virtqueue *vq);
96
97 #ifdef DEBUG
98 /* They're supposed to lock for us. */
99 unsigned int in_use;
100 #endif
101
102 /* Tokens for callbacks. */
103 void *data[];
104 };
105
106 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
107
108 /* Set up an indirect table of descriptors and add it to the queue. */
109 static int vring_add_indirect(struct vring_virtqueue *vq,
110 struct scatterlist sg[],
111 unsigned int out,
112 unsigned int in)
113 {
114 struct vring_desc *desc;
115 unsigned head;
116 int i;
117
118 desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC);
119 if (!desc)
120 return vq->vring.num;
121
122 /* Transfer entries from the sg list into the indirect page */
123 for (i = 0; i < out; i++) {
124 desc[i].flags = VRING_DESC_F_NEXT;
125 desc[i].addr = sg_phys(sg);
126 desc[i].len = sg->length;
127 desc[i].next = i+1;
128 sg++;
129 }
130 for (; i < (out + in); i++) {
131 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
132 desc[i].addr = sg_phys(sg);
133 desc[i].len = sg->length;
134 desc[i].next = i+1;
135 sg++;
136 }
137
138 /* Last one doesn't continue. */
139 desc[i-1].flags &= ~VRING_DESC_F_NEXT;
140 desc[i-1].next = 0;
141
142 /* We're about to use a buffer */
143 vq->num_free--;
144
145 /* Use a single buffer which doesn't continue */
146 head = vq->free_head;
147 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
148 vq->vring.desc[head].addr = virt_to_phys(desc);
149 vq->vring.desc[head].len = i * sizeof(struct vring_desc);
150
151 /* Update free pointer */
152 vq->free_head = vq->vring.desc[head].next;
153
154 return head;
155 }
156
157 static int vring_add_buf(struct virtqueue *_vq,
158 struct scatterlist sg[],
159 unsigned int out,
160 unsigned int in,
161 void *data)
162 {
163 struct vring_virtqueue *vq = to_vvq(_vq);
164 unsigned int i, avail, head, uninitialized_var(prev);
165
166 START_USE(vq);
167
168 BUG_ON(data == NULL);
169
170 /* If the host supports indirect descriptor tables, and we have multiple
171 * buffers, then go indirect. FIXME: tune this threshold */
172 if (vq->indirect && (out + in) > 1 && vq->num_free) {
173 head = vring_add_indirect(vq, sg, out, in);
174 if (head != vq->vring.num)
175 goto add_head;
176 }
177
178 BUG_ON(out + in > vq->vring.num);
179 BUG_ON(out + in == 0);
180
181 if (vq->num_free < out + in) {
182 pr_debug("Can't add buf len %i - avail = %i\n",
183 out + in, vq->num_free);
184 /* FIXME: for historical reasons, we force a notify here if
185 * there are outgoing parts to the buffer. Presumably the
186 * host should service the ring ASAP. */
187 if (out)
188 vq->notify(&vq->vq);
189 END_USE(vq);
190 return -ENOSPC;
191 }
192
193 /* We're about to use some buffers from the free list. */
194 vq->num_free -= out + in;
195
196 head = vq->free_head;
197 for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
198 vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
199 vq->vring.desc[i].addr = sg_phys(sg);
200 vq->vring.desc[i].len = sg->length;
201 prev = i;
202 sg++;
203 }
204 for (; in; i = vq->vring.desc[i].next, in--) {
205 vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
206 vq->vring.desc[i].addr = sg_phys(sg);
207 vq->vring.desc[i].len = sg->length;
208 prev = i;
209 sg++;
210 }
211 /* Last one doesn't continue. */
212 vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
213
214 /* Update free pointer */
215 vq->free_head = i;
216
217 add_head:
218 /* Set token. */
219 vq->data[head] = data;
220
221 /* Put entry in available array (but don't update avail->idx until they
222 * do sync). FIXME: avoid modulus here? */
223 avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
224 vq->vring.avail->ring[avail] = head;
225
226 pr_debug("Added buffer head %i to %p\n", head, vq);
227 END_USE(vq);
228
229 /* If we're indirect, we can fit many (assuming not OOM). */
230 if (vq->indirect)
231 return vq->num_free ? vq->vring.num : 0;
232 return vq->num_free;
233 }
234
235 static void vring_kick(struct virtqueue *_vq)
236 {
237 struct vring_virtqueue *vq = to_vvq(_vq);
238 START_USE(vq);
239 /* Descriptors and available array need to be set before we expose the
240 * new available array entries. */
241 virtio_wmb();
242
243 vq->vring.avail->idx += vq->num_added;
244 vq->num_added = 0;
245
246 /* Need to update avail index before checking if we should notify */
247 virtio_mb();
248
249 if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
250 /* Prod other side to tell it about changes. */
251 vq->notify(&vq->vq);
252
253 END_USE(vq);
254 }
255
256 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
257 {
258 unsigned int i;
259
260 /* Clear data ptr. */
261 vq->data[head] = NULL;
262
263 /* Put back on free list: find end */
264 i = head;
265
266 /* Free the indirect table */
267 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
268 kfree(phys_to_virt(vq->vring.desc[i].addr));
269
270 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
271 i = vq->vring.desc[i].next;
272 vq->num_free++;
273 }
274
275 vq->vring.desc[i].next = vq->free_head;
276 vq->free_head = head;
277 /* Plus final descriptor */
278 vq->num_free++;
279 }
280
281 static inline bool more_used(const struct vring_virtqueue *vq)
282 {
283 return vq->last_used_idx != vq->vring.used->idx;
284 }
285
286 static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
287 {
288 struct vring_virtqueue *vq = to_vvq(_vq);
289 void *ret;
290 unsigned int i;
291
292 START_USE(vq);
293
294 if (unlikely(vq->broken)) {
295 END_USE(vq);
296 return NULL;
297 }
298
299 if (!more_used(vq)) {
300 pr_debug("No more buffers in queue\n");
301 END_USE(vq);
302 return NULL;
303 }
304
305 /* Only get used array entries after they have been exposed by host. */
306 virtio_rmb();
307
308 i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
309 *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
310
311 if (unlikely(i >= vq->vring.num)) {
312 BAD_RING(vq, "id %u out of range\n", i);
313 return NULL;
314 }
315 if (unlikely(!vq->data[i])) {
316 BAD_RING(vq, "id %u is not a head!\n", i);
317 return NULL;
318 }
319
320 /* detach_buf clears data, so grab it now. */
321 ret = vq->data[i];
322 detach_buf(vq, i);
323 vq->last_used_idx++;
324 END_USE(vq);
325 return ret;
326 }
327
328 static void vring_disable_cb(struct virtqueue *_vq)
329 {
330 struct vring_virtqueue *vq = to_vvq(_vq);
331
332 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
333 }
334
335 static bool vring_enable_cb(struct virtqueue *_vq)
336 {
337 struct vring_virtqueue *vq = to_vvq(_vq);
338
339 START_USE(vq);
340
341 /* We optimistically turn back on interrupts, then check if there was
342 * more to do. */
343 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
344 virtio_mb();
345 if (unlikely(more_used(vq))) {
346 END_USE(vq);
347 return false;
348 }
349
350 END_USE(vq);
351 return true;
352 }
353
354 static void *vring_detach_unused_buf(struct virtqueue *_vq)
355 {
356 struct vring_virtqueue *vq = to_vvq(_vq);
357 unsigned int i;
358 void *buf;
359
360 START_USE(vq);
361
362 for (i = 0; i < vq->vring.num; i++) {
363 if (!vq->data[i])
364 continue;
365 /* detach_buf clears data, so grab it now. */
366 buf = vq->data[i];
367 detach_buf(vq, i);
368 END_USE(vq);
369 return buf;
370 }
371 /* That should have freed everything. */
372 BUG_ON(vq->num_free != vq->vring.num);
373
374 END_USE(vq);
375 return NULL;
376 }
377
378 irqreturn_t vring_interrupt(int irq, void *_vq)
379 {
380 struct vring_virtqueue *vq = to_vvq(_vq);
381
382 if (!more_used(vq)) {
383 pr_debug("virtqueue interrupt with no work for %p\n", vq);
384 return IRQ_NONE;
385 }
386
387 if (unlikely(vq->broken))
388 return IRQ_HANDLED;
389
390 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
391 if (vq->vq.callback)
392 vq->vq.callback(&vq->vq);
393
394 return IRQ_HANDLED;
395 }
396 EXPORT_SYMBOL_GPL(vring_interrupt);
397
398 static struct virtqueue_ops vring_vq_ops = {
399 .add_buf = vring_add_buf,
400 .get_buf = vring_get_buf,
401 .kick = vring_kick,
402 .disable_cb = vring_disable_cb,
403 .enable_cb = vring_enable_cb,
404 .detach_unused_buf = vring_detach_unused_buf,
405 };
406
407 struct virtqueue *vring_new_virtqueue(unsigned int num,
408 unsigned int vring_align,
409 struct virtio_device *vdev,
410 void *pages,
411 void (*notify)(struct virtqueue *),
412 void (*callback)(struct virtqueue *),
413 const char *name)
414 {
415 struct vring_virtqueue *vq;
416 unsigned int i;
417
418 /* We assume num is a power of 2. */
419 if (num & (num - 1)) {
420 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
421 return NULL;
422 }
423
424 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
425 if (!vq)
426 return NULL;
427
428 vring_init(&vq->vring, num, pages, vring_align);
429 vq->vq.callback = callback;
430 vq->vq.vdev = vdev;
431 vq->vq.vq_ops = &vring_vq_ops;
432 vq->vq.name = name;
433 vq->notify = notify;
434 vq->broken = false;
435 vq->last_used_idx = 0;
436 vq->num_added = 0;
437 list_add_tail(&vq->vq.list, &vdev->vqs);
438 #ifdef DEBUG
439 vq->in_use = false;
440 #endif
441
442 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
443
444 /* No callback? Tell other side not to bother us. */
445 if (!callback)
446 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
447
448 /* Put everything in free lists. */
449 vq->num_free = num;
450 vq->free_head = 0;
451 for (i = 0; i < num-1; i++) {
452 vq->vring.desc[i].next = i+1;
453 vq->data[i] = NULL;
454 }
455 vq->data[i] = NULL;
456
457 return &vq->vq;
458 }
459 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
460
461 void vring_del_virtqueue(struct virtqueue *vq)
462 {
463 list_del(&vq->list);
464 kfree(to_vvq(vq));
465 }
466 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
467
468 /* Manipulates transport-specific feature bits. */
469 void vring_transport_features(struct virtio_device *vdev)
470 {
471 unsigned int i;
472
473 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
474 switch (i) {
475 case VIRTIO_RING_F_INDIRECT_DESC:
476 break;
477 default:
478 /* We don't understand this bit. */
479 clear_bit(i, vdev->features);
480 }
481 }
482 }
483 EXPORT_SYMBOL_GPL(vring_transport_features);
484
485 MODULE_LICENSE("GPL");
This page took 0.054552 seconds and 5 git commands to generate.