Commit | Line | Data |
---|---|---|
0ea9e1d3 AH |
1 | /* |
2 | * virtio transport for vsock | |
3 | * | |
4 | * Copyright (C) 2013-2015 Red Hat, Inc. | |
5 | * Author: Asias He <asias@redhat.com> | |
6 | * Stefan Hajnoczi <stefanha@redhat.com> | |
7 | * | |
8 | * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s | |
9 | * early virtio-vsock proof-of-concept bits. | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2. | |
12 | */ | |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/list.h> | |
16 | #include <linux/atomic.h> | |
17 | #include <linux/virtio.h> | |
18 | #include <linux/virtio_ids.h> | |
19 | #include <linux/virtio_config.h> | |
20 | #include <linux/virtio_vsock.h> | |
21 | #include <net/sock.h> | |
22 | #include <linux/mutex.h> | |
23 | #include <net/af_vsock.h> | |
24 | ||
25 | static struct workqueue_struct *virtio_vsock_workqueue; | |
26 | static struct virtio_vsock *the_virtio_vsock; | |
27 | static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ | |
28 | ||
29 | struct virtio_vsock { | |
30 | struct virtio_device *vdev; | |
31 | struct virtqueue *vqs[VSOCK_VQ_MAX]; | |
32 | ||
33 | /* Virtqueue processing is deferred to a workqueue */ | |
34 | struct work_struct tx_work; | |
35 | struct work_struct rx_work; | |
36 | struct work_struct event_work; | |
37 | ||
38 | /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX] | |
39 | * must be accessed with tx_lock held. | |
40 | */ | |
41 | struct mutex tx_lock; | |
42 | ||
43 | struct work_struct send_pkt_work; | |
44 | spinlock_t send_pkt_list_lock; | |
45 | struct list_head send_pkt_list; | |
46 | ||
47 | atomic_t queued_replies; | |
48 | ||
49 | /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX] | |
50 | * must be accessed with rx_lock held. | |
51 | */ | |
52 | struct mutex rx_lock; | |
53 | int rx_buf_nr; | |
54 | int rx_buf_max_nr; | |
55 | ||
56 | /* The following fields are protected by event_lock. | |
57 | * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held. | |
58 | */ | |
59 | struct mutex event_lock; | |
60 | struct virtio_vsock_event event_list[8]; | |
61 | ||
62 | u32 guest_cid; | |
63 | }; | |
64 | ||
65 | static struct virtio_vsock *virtio_vsock_get(void) | |
66 | { | |
67 | return the_virtio_vsock; | |
68 | } | |
69 | ||
70 | static u32 virtio_transport_get_local_cid(void) | |
71 | { | |
72 | struct virtio_vsock *vsock = virtio_vsock_get(); | |
73 | ||
74 | return vsock->guest_cid; | |
75 | } | |
76 | ||
77 | static void | |
78 | virtio_transport_send_pkt_work(struct work_struct *work) | |
79 | { | |
80 | struct virtio_vsock *vsock = | |
81 | container_of(work, struct virtio_vsock, send_pkt_work); | |
82 | struct virtqueue *vq; | |
83 | bool added = false; | |
84 | bool restart_rx = false; | |
85 | ||
86 | mutex_lock(&vsock->tx_lock); | |
87 | ||
88 | vq = vsock->vqs[VSOCK_VQ_TX]; | |
89 | ||
90 | /* Avoid unnecessary interrupts while we're processing the ring */ | |
91 | virtqueue_disable_cb(vq); | |
92 | ||
93 | for (;;) { | |
94 | struct virtio_vsock_pkt *pkt; | |
95 | struct scatterlist hdr, buf, *sgs[2]; | |
96 | int ret, in_sg = 0, out_sg = 0; | |
97 | bool reply; | |
98 | ||
99 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
100 | if (list_empty(&vsock->send_pkt_list)) { | |
101 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
102 | virtqueue_enable_cb(vq); | |
103 | break; | |
104 | } | |
105 | ||
106 | pkt = list_first_entry(&vsock->send_pkt_list, | |
107 | struct virtio_vsock_pkt, list); | |
108 | list_del_init(&pkt->list); | |
109 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
110 | ||
111 | reply = pkt->reply; | |
112 | ||
113 | sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); | |
114 | sgs[out_sg++] = &hdr; | |
115 | if (pkt->buf) { | |
116 | sg_init_one(&buf, pkt->buf, pkt->len); | |
117 | sgs[out_sg++] = &buf; | |
118 | } | |
119 | ||
120 | ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL); | |
121 | if (ret < 0) { | |
122 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
123 | list_add(&pkt->list, &vsock->send_pkt_list); | |
124 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
125 | ||
126 | if (!virtqueue_enable_cb(vq) && ret == -ENOSPC) | |
127 | continue; /* retry now that we have more space */ | |
128 | break; | |
129 | } | |
130 | ||
131 | if (reply) { | |
132 | struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; | |
133 | int val; | |
134 | ||
135 | val = atomic_dec_return(&vsock->queued_replies); | |
136 | ||
137 | /* Do we now have resources to resume rx processing? */ | |
138 | if (val + 1 == virtqueue_get_vring_size(rx_vq)) | |
139 | restart_rx = true; | |
140 | } | |
141 | ||
142 | added = true; | |
143 | } | |
144 | ||
145 | if (added) | |
146 | virtqueue_kick(vq); | |
147 | ||
148 | mutex_unlock(&vsock->tx_lock); | |
149 | ||
150 | if (restart_rx) | |
151 | queue_work(virtio_vsock_workqueue, &vsock->rx_work); | |
152 | } | |
153 | ||
154 | static int | |
155 | virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) | |
156 | { | |
157 | struct virtio_vsock *vsock; | |
158 | int len = pkt->len; | |
159 | ||
160 | vsock = virtio_vsock_get(); | |
161 | if (!vsock) { | |
162 | virtio_transport_free_pkt(pkt); | |
163 | return -ENODEV; | |
164 | } | |
165 | ||
166 | if (pkt->reply) | |
167 | atomic_inc(&vsock->queued_replies); | |
168 | ||
169 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
170 | list_add_tail(&pkt->list, &vsock->send_pkt_list); | |
171 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
172 | ||
173 | queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); | |
174 | return len; | |
175 | } | |
176 | ||
177 | static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) | |
178 | { | |
179 | int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; | |
180 | struct virtio_vsock_pkt *pkt; | |
181 | struct scatterlist hdr, buf, *sgs[2]; | |
182 | struct virtqueue *vq; | |
183 | int ret; | |
184 | ||
185 | vq = vsock->vqs[VSOCK_VQ_RX]; | |
186 | ||
187 | do { | |
188 | pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); | |
189 | if (!pkt) | |
190 | break; | |
191 | ||
192 | pkt->buf = kmalloc(buf_len, GFP_KERNEL); | |
193 | if (!pkt->buf) { | |
194 | virtio_transport_free_pkt(pkt); | |
195 | break; | |
196 | } | |
197 | ||
198 | pkt->len = buf_len; | |
199 | ||
200 | sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); | |
201 | sgs[0] = &hdr; | |
202 | ||
203 | sg_init_one(&buf, pkt->buf, buf_len); | |
204 | sgs[1] = &buf; | |
205 | ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL); | |
206 | if (ret) { | |
207 | virtio_transport_free_pkt(pkt); | |
208 | break; | |
209 | } | |
210 | vsock->rx_buf_nr++; | |
211 | } while (vq->num_free); | |
212 | if (vsock->rx_buf_nr > vsock->rx_buf_max_nr) | |
213 | vsock->rx_buf_max_nr = vsock->rx_buf_nr; | |
214 | virtqueue_kick(vq); | |
215 | } | |
216 | ||
217 | static void virtio_transport_tx_work(struct work_struct *work) | |
218 | { | |
219 | struct virtio_vsock *vsock = | |
220 | container_of(work, struct virtio_vsock, tx_work); | |
221 | struct virtqueue *vq; | |
222 | bool added = false; | |
223 | ||
224 | vq = vsock->vqs[VSOCK_VQ_TX]; | |
225 | mutex_lock(&vsock->tx_lock); | |
226 | do { | |
227 | struct virtio_vsock_pkt *pkt; | |
228 | unsigned int len; | |
229 | ||
230 | virtqueue_disable_cb(vq); | |
231 | while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) { | |
232 | virtio_transport_free_pkt(pkt); | |
233 | added = true; | |
234 | } | |
235 | } while (!virtqueue_enable_cb(vq)); | |
236 | mutex_unlock(&vsock->tx_lock); | |
237 | ||
238 | if (added) | |
239 | queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); | |
240 | } | |
241 | ||
242 | /* Is there space left for replies to rx packets? */ | |
243 | static bool virtio_transport_more_replies(struct virtio_vsock *vsock) | |
244 | { | |
245 | struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX]; | |
246 | int val; | |
247 | ||
248 | smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ | |
249 | val = atomic_read(&vsock->queued_replies); | |
250 | ||
251 | return val < virtqueue_get_vring_size(vq); | |
252 | } | |
253 | ||
254 | static void virtio_transport_rx_work(struct work_struct *work) | |
255 | { | |
256 | struct virtio_vsock *vsock = | |
257 | container_of(work, struct virtio_vsock, rx_work); | |
258 | struct virtqueue *vq; | |
259 | ||
260 | vq = vsock->vqs[VSOCK_VQ_RX]; | |
261 | ||
262 | mutex_lock(&vsock->rx_lock); | |
263 | ||
264 | do { | |
265 | virtqueue_disable_cb(vq); | |
266 | for (;;) { | |
267 | struct virtio_vsock_pkt *pkt; | |
268 | unsigned int len; | |
269 | ||
270 | if (!virtio_transport_more_replies(vsock)) { | |
271 | /* Stop rx until the device processes already | |
272 | * pending replies. Leave rx virtqueue | |
273 | * callbacks disabled. | |
274 | */ | |
275 | goto out; | |
276 | } | |
277 | ||
278 | pkt = virtqueue_get_buf(vq, &len); | |
279 | if (!pkt) { | |
280 | break; | |
281 | } | |
282 | ||
283 | vsock->rx_buf_nr--; | |
284 | ||
285 | /* Drop short/long packets */ | |
286 | if (unlikely(len < sizeof(pkt->hdr) || | |
287 | len > sizeof(pkt->hdr) + pkt->len)) { | |
288 | virtio_transport_free_pkt(pkt); | |
289 | continue; | |
290 | } | |
291 | ||
292 | pkt->len = len - sizeof(pkt->hdr); | |
293 | virtio_transport_recv_pkt(pkt); | |
294 | } | |
295 | } while (!virtqueue_enable_cb(vq)); | |
296 | ||
297 | out: | |
298 | if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) | |
299 | virtio_vsock_rx_fill(vsock); | |
300 | mutex_unlock(&vsock->rx_lock); | |
301 | } | |
302 | ||
303 | /* event_lock must be held */ | |
304 | static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, | |
305 | struct virtio_vsock_event *event) | |
306 | { | |
307 | struct scatterlist sg; | |
308 | struct virtqueue *vq; | |
309 | ||
310 | vq = vsock->vqs[VSOCK_VQ_EVENT]; | |
311 | ||
312 | sg_init_one(&sg, event, sizeof(*event)); | |
313 | ||
314 | return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL); | |
315 | } | |
316 | ||
317 | /* event_lock must be held */ | |
318 | static void virtio_vsock_event_fill(struct virtio_vsock *vsock) | |
319 | { | |
320 | size_t i; | |
321 | ||
322 | for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) { | |
323 | struct virtio_vsock_event *event = &vsock->event_list[i]; | |
324 | ||
325 | virtio_vsock_event_fill_one(vsock, event); | |
326 | } | |
327 | ||
328 | virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); | |
329 | } | |
330 | ||
331 | static void virtio_vsock_reset_sock(struct sock *sk) | |
332 | { | |
333 | lock_sock(sk); | |
334 | sk->sk_state = SS_UNCONNECTED; | |
335 | sk->sk_err = ECONNRESET; | |
336 | sk->sk_error_report(sk); | |
337 | release_sock(sk); | |
338 | } | |
339 | ||
340 | static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock) | |
341 | { | |
342 | struct virtio_device *vdev = vsock->vdev; | |
343 | u64 guest_cid; | |
344 | ||
345 | vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid), | |
346 | &guest_cid, sizeof(guest_cid)); | |
347 | vsock->guest_cid = le64_to_cpu(guest_cid); | |
348 | } | |
349 | ||
350 | /* event_lock must be held */ | |
351 | static void virtio_vsock_event_handle(struct virtio_vsock *vsock, | |
352 | struct virtio_vsock_event *event) | |
353 | { | |
354 | switch (le32_to_cpu(event->id)) { | |
355 | case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET: | |
356 | virtio_vsock_update_guest_cid(vsock); | |
357 | vsock_for_each_connected_socket(virtio_vsock_reset_sock); | |
358 | break; | |
359 | } | |
360 | } | |
361 | ||
362 | static void virtio_transport_event_work(struct work_struct *work) | |
363 | { | |
364 | struct virtio_vsock *vsock = | |
365 | container_of(work, struct virtio_vsock, event_work); | |
366 | struct virtqueue *vq; | |
367 | ||
368 | vq = vsock->vqs[VSOCK_VQ_EVENT]; | |
369 | ||
370 | mutex_lock(&vsock->event_lock); | |
371 | ||
372 | do { | |
373 | struct virtio_vsock_event *event; | |
374 | unsigned int len; | |
375 | ||
376 | virtqueue_disable_cb(vq); | |
377 | while ((event = virtqueue_get_buf(vq, &len)) != NULL) { | |
378 | if (len == sizeof(*event)) | |
379 | virtio_vsock_event_handle(vsock, event); | |
380 | ||
381 | virtio_vsock_event_fill_one(vsock, event); | |
382 | } | |
383 | } while (!virtqueue_enable_cb(vq)); | |
384 | ||
385 | virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); | |
386 | ||
387 | mutex_unlock(&vsock->event_lock); | |
388 | } | |
389 | ||
390 | static void virtio_vsock_event_done(struct virtqueue *vq) | |
391 | { | |
392 | struct virtio_vsock *vsock = vq->vdev->priv; | |
393 | ||
394 | if (!vsock) | |
395 | return; | |
396 | queue_work(virtio_vsock_workqueue, &vsock->event_work); | |
397 | } | |
398 | ||
399 | static void virtio_vsock_tx_done(struct virtqueue *vq) | |
400 | { | |
401 | struct virtio_vsock *vsock = vq->vdev->priv; | |
402 | ||
403 | if (!vsock) | |
404 | return; | |
405 | queue_work(virtio_vsock_workqueue, &vsock->tx_work); | |
406 | } | |
407 | ||
408 | static void virtio_vsock_rx_done(struct virtqueue *vq) | |
409 | { | |
410 | struct virtio_vsock *vsock = vq->vdev->priv; | |
411 | ||
412 | if (!vsock) | |
413 | return; | |
414 | queue_work(virtio_vsock_workqueue, &vsock->rx_work); | |
415 | } | |
416 | ||
417 | static struct virtio_transport virtio_transport = { | |
418 | .transport = { | |
419 | .get_local_cid = virtio_transport_get_local_cid, | |
420 | ||
421 | .init = virtio_transport_do_socket_init, | |
422 | .destruct = virtio_transport_destruct, | |
423 | .release = virtio_transport_release, | |
424 | .connect = virtio_transport_connect, | |
425 | .shutdown = virtio_transport_shutdown, | |
426 | ||
427 | .dgram_bind = virtio_transport_dgram_bind, | |
428 | .dgram_dequeue = virtio_transport_dgram_dequeue, | |
429 | .dgram_enqueue = virtio_transport_dgram_enqueue, | |
430 | .dgram_allow = virtio_transport_dgram_allow, | |
431 | ||
432 | .stream_dequeue = virtio_transport_stream_dequeue, | |
433 | .stream_enqueue = virtio_transport_stream_enqueue, | |
434 | .stream_has_data = virtio_transport_stream_has_data, | |
435 | .stream_has_space = virtio_transport_stream_has_space, | |
436 | .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, | |
437 | .stream_is_active = virtio_transport_stream_is_active, | |
438 | .stream_allow = virtio_transport_stream_allow, | |
439 | ||
440 | .notify_poll_in = virtio_transport_notify_poll_in, | |
441 | .notify_poll_out = virtio_transport_notify_poll_out, | |
442 | .notify_recv_init = virtio_transport_notify_recv_init, | |
443 | .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, | |
444 | .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, | |
445 | .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, | |
446 | .notify_send_init = virtio_transport_notify_send_init, | |
447 | .notify_send_pre_block = virtio_transport_notify_send_pre_block, | |
448 | .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, | |
449 | .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, | |
450 | ||
451 | .set_buffer_size = virtio_transport_set_buffer_size, | |
452 | .set_min_buffer_size = virtio_transport_set_min_buffer_size, | |
453 | .set_max_buffer_size = virtio_transport_set_max_buffer_size, | |
454 | .get_buffer_size = virtio_transport_get_buffer_size, | |
455 | .get_min_buffer_size = virtio_transport_get_min_buffer_size, | |
456 | .get_max_buffer_size = virtio_transport_get_max_buffer_size, | |
457 | }, | |
458 | ||
459 | .send_pkt = virtio_transport_send_pkt, | |
460 | }; | |
461 | ||
462 | static int virtio_vsock_probe(struct virtio_device *vdev) | |
463 | { | |
464 | vq_callback_t *callbacks[] = { | |
465 | virtio_vsock_rx_done, | |
466 | virtio_vsock_tx_done, | |
467 | virtio_vsock_event_done, | |
468 | }; | |
469 | static const char * const names[] = { | |
470 | "rx", | |
471 | "tx", | |
472 | "event", | |
473 | }; | |
474 | struct virtio_vsock *vsock = NULL; | |
475 | int ret; | |
476 | ||
477 | ret = mutex_lock_interruptible(&the_virtio_vsock_mutex); | |
478 | if (ret) | |
479 | return ret; | |
480 | ||
481 | /* Only one virtio-vsock device per guest is supported */ | |
482 | if (the_virtio_vsock) { | |
483 | ret = -EBUSY; | |
484 | goto out; | |
485 | } | |
486 | ||
487 | vsock = kzalloc(sizeof(*vsock), GFP_KERNEL); | |
488 | if (!vsock) { | |
489 | ret = -ENOMEM; | |
490 | goto out; | |
491 | } | |
492 | ||
493 | vsock->vdev = vdev; | |
494 | ||
495 | ret = vsock->vdev->config->find_vqs(vsock->vdev, VSOCK_VQ_MAX, | |
496 | vsock->vqs, callbacks, names); | |
497 | if (ret < 0) | |
498 | goto out; | |
499 | ||
500 | virtio_vsock_update_guest_cid(vsock); | |
501 | ||
502 | ret = vsock_core_init(&virtio_transport.transport); | |
503 | if (ret < 0) | |
504 | goto out_vqs; | |
505 | ||
506 | vsock->rx_buf_nr = 0; | |
507 | vsock->rx_buf_max_nr = 0; | |
508 | atomic_set(&vsock->queued_replies, 0); | |
509 | ||
510 | vdev->priv = vsock; | |
511 | the_virtio_vsock = vsock; | |
512 | mutex_init(&vsock->tx_lock); | |
513 | mutex_init(&vsock->rx_lock); | |
514 | mutex_init(&vsock->event_lock); | |
515 | spin_lock_init(&vsock->send_pkt_list_lock); | |
516 | INIT_LIST_HEAD(&vsock->send_pkt_list); | |
517 | INIT_WORK(&vsock->rx_work, virtio_transport_rx_work); | |
518 | INIT_WORK(&vsock->tx_work, virtio_transport_tx_work); | |
519 | INIT_WORK(&vsock->event_work, virtio_transport_event_work); | |
520 | INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); | |
521 | ||
522 | mutex_lock(&vsock->rx_lock); | |
523 | virtio_vsock_rx_fill(vsock); | |
524 | mutex_unlock(&vsock->rx_lock); | |
525 | ||
526 | mutex_lock(&vsock->event_lock); | |
527 | virtio_vsock_event_fill(vsock); | |
528 | mutex_unlock(&vsock->event_lock); | |
529 | ||
530 | mutex_unlock(&the_virtio_vsock_mutex); | |
531 | return 0; | |
532 | ||
533 | out_vqs: | |
534 | vsock->vdev->config->del_vqs(vsock->vdev); | |
535 | out: | |
536 | kfree(vsock); | |
537 | mutex_unlock(&the_virtio_vsock_mutex); | |
538 | return ret; | |
539 | } | |
540 | ||
541 | static void virtio_vsock_remove(struct virtio_device *vdev) | |
542 | { | |
543 | struct virtio_vsock *vsock = vdev->priv; | |
544 | struct virtio_vsock_pkt *pkt; | |
545 | ||
546 | flush_work(&vsock->rx_work); | |
547 | flush_work(&vsock->tx_work); | |
548 | flush_work(&vsock->event_work); | |
549 | flush_work(&vsock->send_pkt_work); | |
550 | ||
551 | vdev->config->reset(vdev); | |
552 | ||
553 | mutex_lock(&vsock->rx_lock); | |
554 | while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) | |
555 | virtio_transport_free_pkt(pkt); | |
556 | mutex_unlock(&vsock->rx_lock); | |
557 | ||
558 | mutex_lock(&vsock->tx_lock); | |
559 | while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX]))) | |
560 | virtio_transport_free_pkt(pkt); | |
561 | mutex_unlock(&vsock->tx_lock); | |
562 | ||
563 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
564 | while (!list_empty(&vsock->send_pkt_list)) { | |
565 | pkt = list_first_entry(&vsock->send_pkt_list, | |
566 | struct virtio_vsock_pkt, list); | |
567 | list_del(&pkt->list); | |
568 | virtio_transport_free_pkt(pkt); | |
569 | } | |
570 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
571 | ||
572 | mutex_lock(&the_virtio_vsock_mutex); | |
573 | the_virtio_vsock = NULL; | |
574 | vsock_core_exit(); | |
575 | mutex_unlock(&the_virtio_vsock_mutex); | |
576 | ||
577 | vdev->config->del_vqs(vdev); | |
578 | ||
579 | kfree(vsock); | |
580 | } | |
581 | ||
582 | static struct virtio_device_id id_table[] = { | |
583 | { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID }, | |
584 | { 0 }, | |
585 | }; | |
586 | ||
587 | static unsigned int features[] = { | |
588 | }; | |
589 | ||
590 | static struct virtio_driver virtio_vsock_driver = { | |
591 | .feature_table = features, | |
592 | .feature_table_size = ARRAY_SIZE(features), | |
593 | .driver.name = KBUILD_MODNAME, | |
594 | .driver.owner = THIS_MODULE, | |
595 | .id_table = id_table, | |
596 | .probe = virtio_vsock_probe, | |
597 | .remove = virtio_vsock_remove, | |
598 | }; | |
599 | ||
600 | static int __init virtio_vsock_init(void) | |
601 | { | |
602 | int ret; | |
603 | ||
604 | virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); | |
605 | if (!virtio_vsock_workqueue) | |
606 | return -ENOMEM; | |
607 | ret = register_virtio_driver(&virtio_vsock_driver); | |
608 | if (ret) | |
609 | destroy_workqueue(virtio_vsock_workqueue); | |
610 | return ret; | |
611 | } | |
612 | ||
613 | static void __exit virtio_vsock_exit(void) | |
614 | { | |
615 | unregister_virtio_driver(&virtio_vsock_driver); | |
616 | destroy_workqueue(virtio_vsock_workqueue); | |
617 | } | |
618 | ||
619 | module_init(virtio_vsock_init); | |
620 | module_exit(virtio_vsock_exit); | |
621 | MODULE_LICENSE("GPL v2"); | |
622 | MODULE_AUTHOR("Asias He"); | |
623 | MODULE_DESCRIPTION("virtio transport for vsock"); | |
624 | MODULE_DEVICE_TABLE(virtio, id_table); |