Commit | Line | Data |
---|---|---|
3a4d5c94 MT |
1 | /* Copyright (C) 2009 Red Hat, Inc. |
2 | * Author: Michael S. Tsirkin <mst@redhat.com> | |
3 | * | |
4 | * This work is licensed under the terms of the GNU GPL, version 2. | |
5 | * | |
6 | * virtio-net server in host kernel. | |
7 | */ | |
8 | ||
9 | #include <linux/compat.h> | |
10 | #include <linux/eventfd.h> | |
11 | #include <linux/vhost.h> | |
12 | #include <linux/virtio_net.h> | |
3a4d5c94 MT |
13 | #include <linux/miscdevice.h> |
14 | #include <linux/module.h> | |
bab632d6 | 15 | #include <linux/moduleparam.h> |
3a4d5c94 MT |
16 | #include <linux/mutex.h> |
17 | #include <linux/workqueue.h> | |
18 | #include <linux/rcupdate.h> | |
19 | #include <linux/file.h> | |
5a0e3ad6 | 20 | #include <linux/slab.h> |
3a4d5c94 MT |
21 | |
22 | #include <linux/net.h> | |
23 | #include <linux/if_packet.h> | |
24 | #include <linux/if_arp.h> | |
25 | #include <linux/if_tun.h> | |
501c774c | 26 | #include <linux/if_macvlan.h> |
c53cff5e | 27 | #include <linux/if_vlan.h> |
3a4d5c94 MT |
28 | |
29 | #include <net/sock.h> | |
30 | ||
31 | #include "vhost.h" | |
32 | ||
bab632d6 MT |
33 | static int experimental_zcopytx; |
34 | module_param(experimental_zcopytx, int, 0444); | |
35 | MODULE_PARM_DESC(experimental_zcopytx, "Enable Experimental Zero Copy TX"); | |
36 | ||
3a4d5c94 MT |
37 | /* Max number of bytes transferred before requeueing the job. |
38 | * Using this limit prevents one virtqueue from starving others. */ | |
39 | #define VHOST_NET_WEIGHT 0x80000 | |
40 | ||
bab632d6 MT |
41 | /* MAX number of TX used buffers for outstanding zerocopy */ |
42 | #define VHOST_MAX_PEND 128 | |
43 | #define VHOST_GOODCOPY_LEN 256 | |
44 | ||
eaae8132 MT |
45 | /* |
46 | * For transmit, used buffer len is unused; we override it to track buffer | |
47 | * status internally; used for zerocopy tx only. | |
48 | */ | |
49 | /* Lower device DMA failed */ | |
50 | #define VHOST_DMA_FAILED_LEN 3 | |
51 | /* Lower device DMA done */ | |
52 | #define VHOST_DMA_DONE_LEN 2 | |
53 | /* Lower device DMA in progress */ | |
54 | #define VHOST_DMA_IN_PROGRESS 1 | |
55 | /* Buffer unused */ | |
56 | #define VHOST_DMA_CLEAR_LEN 0 | |
57 | ||
58 | #define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN) | |
59 | ||
3a4d5c94 MT |
60 | enum { |
61 | VHOST_NET_VQ_RX = 0, | |
62 | VHOST_NET_VQ_TX = 1, | |
63 | VHOST_NET_VQ_MAX = 2, | |
64 | }; | |
65 | ||
66 | enum vhost_net_poll_state { | |
67 | VHOST_NET_POLL_DISABLED = 0, | |
68 | VHOST_NET_POLL_STARTED = 1, | |
69 | VHOST_NET_POLL_STOPPED = 2, | |
70 | }; | |
71 | ||
72 | struct vhost_net { | |
73 | struct vhost_dev dev; | |
74 | struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX]; | |
75 | struct vhost_poll poll[VHOST_NET_VQ_MAX]; | |
76 | /* Tells us whether we are polling a socket for TX. | |
77 | * We only do this when socket buffer fills up. | |
78 | * Protected by tx vq lock. */ | |
79 | enum vhost_net_poll_state tx_poll_state; | |
eaae8132 MT |
80 | /* Number of TX recently submitted. |
81 | * Protected by tx vq lock. */ | |
82 | unsigned tx_packets; | |
83 | /* Number of times zerocopy TX recently failed. | |
84 | * Protected by tx vq lock. */ | |
85 | unsigned tx_zcopy_err; | |
1280c27f MT |
86 | /* Flush in progress. Protected by tx vq lock. */ |
87 | bool tx_flush; | |
3a4d5c94 MT |
88 | }; |
89 | ||
eaae8132 MT |
90 | static void vhost_net_tx_packet(struct vhost_net *net) |
91 | { | |
92 | ++net->tx_packets; | |
93 | if (net->tx_packets < 1024) | |
94 | return; | |
95 | net->tx_packets = 0; | |
96 | net->tx_zcopy_err = 0; | |
97 | } | |
98 | ||
99 | static void vhost_net_tx_err(struct vhost_net *net) | |
100 | { | |
101 | ++net->tx_zcopy_err; | |
102 | } | |
103 | ||
104 | static bool vhost_net_tx_select_zcopy(struct vhost_net *net) | |
105 | { | |
1280c27f MT |
106 | /* TX flush waits for outstanding DMAs to be done. |
107 | * Don't start new DMAs. | |
108 | */ | |
109 | return !net->tx_flush && | |
110 | net->tx_packets / 64 >= net->tx_zcopy_err; | |
eaae8132 MT |
111 | } |
112 | ||
bab632d6 MT |
113 | static bool vhost_sock_zcopy(struct socket *sock) |
114 | { | |
115 | return unlikely(experimental_zcopytx) && | |
116 | sock_flag(sock->sk, SOCK_ZEROCOPY); | |
117 | } | |
118 | ||
3a4d5c94 MT |
119 | /* Pop first len bytes from iovec. Return number of segments used. */ |
120 | static int move_iovec_hdr(struct iovec *from, struct iovec *to, | |
121 | size_t len, int iov_count) | |
122 | { | |
123 | int seg = 0; | |
124 | size_t size; | |
d47effe1 | 125 | |
3a4d5c94 MT |
126 | while (len && seg < iov_count) { |
127 | size = min(from->iov_len, len); | |
128 | to->iov_base = from->iov_base; | |
129 | to->iov_len = size; | |
130 | from->iov_len -= size; | |
131 | from->iov_base += size; | |
132 | len -= size; | |
133 | ++from; | |
134 | ++to; | |
135 | ++seg; | |
136 | } | |
137 | return seg; | |
138 | } | |
8dd014ad DS |
139 | /* Copy iovec entries for len bytes from iovec. */ |
140 | static void copy_iovec_hdr(const struct iovec *from, struct iovec *to, | |
141 | size_t len, int iovcount) | |
142 | { | |
143 | int seg = 0; | |
144 | size_t size; | |
d47effe1 | 145 | |
8dd014ad DS |
146 | while (len && seg < iovcount) { |
147 | size = min(from->iov_len, len); | |
148 | to->iov_base = from->iov_base; | |
149 | to->iov_len = size; | |
150 | len -= size; | |
151 | ++from; | |
152 | ++to; | |
153 | ++seg; | |
154 | } | |
155 | } | |
3a4d5c94 MT |
156 | |
157 | /* Caller must have TX VQ lock */ | |
158 | static void tx_poll_stop(struct vhost_net *net) | |
159 | { | |
160 | if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED)) | |
161 | return; | |
162 | vhost_poll_stop(net->poll + VHOST_NET_VQ_TX); | |
163 | net->tx_poll_state = VHOST_NET_POLL_STOPPED; | |
164 | } | |
165 | ||
166 | /* Caller must have TX VQ lock */ | |
167 | static void tx_poll_start(struct vhost_net *net, struct socket *sock) | |
168 | { | |
169 | if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) | |
170 | return; | |
171 | vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); | |
172 | net->tx_poll_state = VHOST_NET_POLL_STARTED; | |
173 | } | |
174 | ||
b211616d MT |
175 | /* In case of DMA done not in order in lower device driver for some reason. |
176 | * upend_idx is used to track end of used idx, done_idx is used to track head | |
177 | * of used idx. Once lower device DMA done contiguously, we will signal KVM | |
178 | * guest used idx. | |
179 | */ | |
eaae8132 MT |
180 | static int vhost_zerocopy_signal_used(struct vhost_net *net, |
181 | struct vhost_virtqueue *vq) | |
b211616d MT |
182 | { |
183 | int i; | |
184 | int j = 0; | |
185 | ||
186 | for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) { | |
eaae8132 MT |
187 | if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) |
188 | vhost_net_tx_err(net); | |
b211616d MT |
189 | if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { |
190 | vq->heads[i].len = VHOST_DMA_CLEAR_LEN; | |
191 | vhost_add_used_and_signal(vq->dev, vq, | |
192 | vq->heads[i].id, 0); | |
193 | ++j; | |
194 | } else | |
195 | break; | |
196 | } | |
197 | if (j) | |
198 | vq->done_idx = i; | |
199 | return j; | |
200 | } | |
201 | ||
eaae8132 | 202 | static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) |
b211616d MT |
203 | { |
204 | struct vhost_ubuf_ref *ubufs = ubuf->ctx; | |
205 | struct vhost_virtqueue *vq = ubufs->vq; | |
24eb21a1 MT |
206 | int cnt = atomic_read(&ubufs->kref.refcount); |
207 | ||
208 | /* | |
209 | * Trigger polling thread if guest stopped submitting new buffers: | |
210 | * in this case, the refcount after decrement will eventually reach 1 | |
211 | * so here it is 2. | |
212 | * We also trigger polling periodically after each 16 packets | |
213 | * (the value 16 here is more or less arbitrary, it's tuned to trigger | |
214 | * less than 10% of times). | |
215 | */ | |
216 | if (cnt <= 2 || !(cnt % 16)) | |
217 | vhost_poll_queue(&vq->poll); | |
b211616d | 218 | /* set len to mark this desc buffers done DMA */ |
eaae8132 MT |
219 | vq->heads[ubuf->desc].len = success ? |
220 | VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; | |
b211616d MT |
221 | vhost_ubuf_put(ubufs); |
222 | } | |
223 | ||
3a4d5c94 MT |
224 | /* Expects to be always run from workqueue - which acts as |
225 | * read-size critical section for our kind of RCU. */ | |
226 | static void handle_tx(struct vhost_net *net) | |
227 | { | |
228 | struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX]; | |
d5675bd2 MT |
229 | unsigned out, in, s; |
230 | int head; | |
3a4d5c94 MT |
231 | struct msghdr msg = { |
232 | .msg_name = NULL, | |
233 | .msg_namelen = 0, | |
234 | .msg_control = NULL, | |
235 | .msg_controllen = 0, | |
236 | .msg_iov = vq->iov, | |
237 | .msg_flags = MSG_DONTWAIT, | |
238 | }; | |
239 | size_t len, total_len = 0; | |
240 | int err, wmem; | |
241 | size_t hdr_size; | |
28457ee6 | 242 | struct socket *sock; |
bab632d6 | 243 | struct vhost_ubuf_ref *uninitialized_var(ubufs); |
cedb9bdc | 244 | bool zcopy, zcopy_used; |
28457ee6 | 245 | |
5e18247b | 246 | /* TODO: check that we are running from vhost_worker? */ |
11cd1a8b | 247 | sock = rcu_dereference_check(vq->private_data, 1); |
3a4d5c94 MT |
248 | if (!sock) |
249 | return; | |
250 | ||
251 | wmem = atomic_read(&sock->sk->sk_wmem_alloc); | |
39286fa4 SS |
252 | if (wmem >= sock->sk->sk_sndbuf) { |
253 | mutex_lock(&vq->mutex); | |
254 | tx_poll_start(net, sock); | |
255 | mutex_unlock(&vq->mutex); | |
3a4d5c94 | 256 | return; |
39286fa4 | 257 | } |
3a4d5c94 | 258 | |
3a4d5c94 | 259 | mutex_lock(&vq->mutex); |
8ea8cf89 | 260 | vhost_disable_notify(&net->dev, vq); |
3a4d5c94 | 261 | |
0e255572 | 262 | if (wmem < sock->sk->sk_sndbuf / 2) |
3a4d5c94 | 263 | tx_poll_stop(net); |
8dd014ad | 264 | hdr_size = vq->vhost_hlen; |
c460f057 | 265 | zcopy = vq->ubufs; |
3a4d5c94 MT |
266 | |
267 | for (;;) { | |
bab632d6 MT |
268 | /* Release DMAs done buffers first */ |
269 | if (zcopy) | |
eaae8132 | 270 | vhost_zerocopy_signal_used(net, vq); |
bab632d6 | 271 | |
3a4d5c94 MT |
272 | head = vhost_get_vq_desc(&net->dev, vq, vq->iov, |
273 | ARRAY_SIZE(vq->iov), | |
274 | &out, &in, | |
275 | NULL, NULL); | |
d5675bd2 | 276 | /* On error, stop handling until the next kick. */ |
7b3384fc | 277 | if (unlikely(head < 0)) |
d5675bd2 | 278 | break; |
3a4d5c94 MT |
279 | /* Nothing new? Wait for eventfd to tell us they refilled. */ |
280 | if (head == vq->num) { | |
9e380825 SM |
281 | int num_pends; |
282 | ||
3a4d5c94 MT |
283 | wmem = atomic_read(&sock->sk->sk_wmem_alloc); |
284 | if (wmem >= sock->sk->sk_sndbuf * 3 / 4) { | |
285 | tx_poll_start(net, sock); | |
286 | set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); | |
287 | break; | |
288 | } | |
9e380825 SM |
289 | /* If more outstanding DMAs, queue the work. |
290 | * Handle upend_idx wrap around | |
291 | */ | |
292 | num_pends = likely(vq->upend_idx >= vq->done_idx) ? | |
293 | (vq->upend_idx - vq->done_idx) : | |
294 | (vq->upend_idx + UIO_MAXIOV - vq->done_idx); | |
295 | if (unlikely(num_pends > VHOST_MAX_PEND)) { | |
bab632d6 MT |
296 | tx_poll_start(net, sock); |
297 | set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); | |
298 | break; | |
299 | } | |
8ea8cf89 MT |
300 | if (unlikely(vhost_enable_notify(&net->dev, vq))) { |
301 | vhost_disable_notify(&net->dev, vq); | |
3a4d5c94 MT |
302 | continue; |
303 | } | |
304 | break; | |
305 | } | |
306 | if (in) { | |
307 | vq_err(vq, "Unexpected descriptor format for TX: " | |
308 | "out %d, int %d\n", out, in); | |
309 | break; | |
310 | } | |
311 | /* Skip header. TODO: support TSO. */ | |
312 | s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out); | |
313 | msg.msg_iovlen = out; | |
314 | len = iov_length(vq->iov, out); | |
315 | /* Sanity check */ | |
316 | if (!len) { | |
317 | vq_err(vq, "Unexpected header len for TX: " | |
318 | "%zd expected %zd\n", | |
319 | iov_length(vq->hdr, s), hdr_size); | |
320 | break; | |
321 | } | |
cedb9bdc MT |
322 | zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN || |
323 | vq->upend_idx != vq->done_idx); | |
324 | ||
bab632d6 | 325 | /* use msg_control to pass vhost zerocopy ubuf info to skb */ |
cedb9bdc | 326 | if (zcopy_used) { |
bab632d6 | 327 | vq->heads[vq->upend_idx].id = head; |
eaae8132 MT |
328 | if (!vhost_net_tx_select_zcopy(net) || |
329 | len < VHOST_GOODCOPY_LEN) { | |
bab632d6 MT |
330 | /* copy don't need to wait for DMA done */ |
331 | vq->heads[vq->upend_idx].len = | |
332 | VHOST_DMA_DONE_LEN; | |
333 | msg.msg_control = NULL; | |
334 | msg.msg_controllen = 0; | |
335 | ubufs = NULL; | |
336 | } else { | |
337 | struct ubuf_info *ubuf = &vq->ubuf_info[head]; | |
338 | ||
70e4cb9a MT |
339 | vq->heads[vq->upend_idx].len = |
340 | VHOST_DMA_IN_PROGRESS; | |
bab632d6 | 341 | ubuf->callback = vhost_zerocopy_callback; |
ca8f4fb2 | 342 | ubuf->ctx = vq->ubufs; |
bab632d6 MT |
343 | ubuf->desc = vq->upend_idx; |
344 | msg.msg_control = ubuf; | |
345 | msg.msg_controllen = sizeof(ubuf); | |
346 | ubufs = vq->ubufs; | |
347 | kref_get(&ubufs->kref); | |
348 | } | |
349 | vq->upend_idx = (vq->upend_idx + 1) % UIO_MAXIOV; | |
350 | } | |
3a4d5c94 MT |
351 | /* TODO: Check specific error and bomb out unless ENOBUFS? */ |
352 | err = sock->ops->sendmsg(NULL, sock, &msg, len); | |
353 | if (unlikely(err < 0)) { | |
cedb9bdc | 354 | if (zcopy_used) { |
bab632d6 MT |
355 | if (ubufs) |
356 | vhost_ubuf_put(ubufs); | |
357 | vq->upend_idx = ((unsigned)vq->upend_idx - 1) % | |
358 | UIO_MAXIOV; | |
359 | } | |
8dd014ad | 360 | vhost_discard_vq_desc(vq, 1); |
dbf34207 JW |
361 | if (err == -EAGAIN || err == -ENOBUFS) |
362 | tx_poll_start(net, sock); | |
3a4d5c94 MT |
363 | break; |
364 | } | |
365 | if (err != len) | |
95c0ec6a MT |
366 | pr_debug("Truncated TX packet: " |
367 | " len %d != %zd\n", err, len); | |
cedb9bdc | 368 | if (!zcopy_used) |
bab632d6 | 369 | vhost_add_used_and_signal(&net->dev, vq, head, 0); |
c8fb217a | 370 | else |
eaae8132 | 371 | vhost_zerocopy_signal_used(net, vq); |
3a4d5c94 | 372 | total_len += len; |
eaae8132 | 373 | vhost_net_tx_packet(net); |
3a4d5c94 MT |
374 | if (unlikely(total_len >= VHOST_NET_WEIGHT)) { |
375 | vhost_poll_queue(&vq->poll); | |
376 | break; | |
377 | } | |
378 | } | |
379 | ||
380 | mutex_unlock(&vq->mutex); | |
3a4d5c94 MT |
381 | } |
382 | ||
8dd014ad DS |
383 | static int peek_head_len(struct sock *sk) |
384 | { | |
385 | struct sk_buff *head; | |
386 | int len = 0; | |
783e3988 | 387 | unsigned long flags; |
8dd014ad | 388 | |
783e3988 | 389 | spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); |
8dd014ad | 390 | head = skb_peek(&sk->sk_receive_queue); |
c53cff5e | 391 | if (likely(head)) { |
8dd014ad | 392 | len = head->len; |
c53cff5e BG |
393 | if (vlan_tx_tag_present(head)) |
394 | len += VLAN_HLEN; | |
395 | } | |
396 | ||
783e3988 | 397 | spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); |
8dd014ad DS |
398 | return len; |
399 | } | |
400 | ||
401 | /* This is a multi-buffer version of vhost_get_desc, that works if | |
402 | * vq has read descriptors only. | |
403 | * @vq - the relevant virtqueue | |
404 | * @datalen - data length we'll be reading | |
405 | * @iovcount - returned count of io vectors we fill | |
406 | * @log - vhost log | |
407 | * @log_num - log offset | |
94249369 | 408 | * @quota - headcount quota, 1 for big buffer |
8dd014ad DS |
409 | * returns number of buffer heads allocated, negative on error |
410 | */ | |
411 | static int get_rx_bufs(struct vhost_virtqueue *vq, | |
412 | struct vring_used_elem *heads, | |
413 | int datalen, | |
414 | unsigned *iovcount, | |
415 | struct vhost_log *log, | |
94249369 JW |
416 | unsigned *log_num, |
417 | unsigned int quota) | |
8dd014ad DS |
418 | { |
419 | unsigned int out, in; | |
420 | int seg = 0; | |
421 | int headcount = 0; | |
422 | unsigned d; | |
423 | int r, nlogs = 0; | |
424 | ||
94249369 | 425 | while (datalen > 0 && headcount < quota) { |
e0e9b406 | 426 | if (unlikely(seg >= UIO_MAXIOV)) { |
8dd014ad DS |
427 | r = -ENOBUFS; |
428 | goto err; | |
429 | } | |
430 | d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg, | |
431 | ARRAY_SIZE(vq->iov) - seg, &out, | |
432 | &in, log, log_num); | |
433 | if (d == vq->num) { | |
434 | r = 0; | |
435 | goto err; | |
436 | } | |
437 | if (unlikely(out || in <= 0)) { | |
438 | vq_err(vq, "unexpected descriptor format for RX: " | |
439 | "out %d, in %d\n", out, in); | |
440 | r = -EINVAL; | |
441 | goto err; | |
442 | } | |
443 | if (unlikely(log)) { | |
444 | nlogs += *log_num; | |
445 | log += *log_num; | |
446 | } | |
447 | heads[headcount].id = d; | |
448 | heads[headcount].len = iov_length(vq->iov + seg, in); | |
449 | datalen -= heads[headcount].len; | |
450 | ++headcount; | |
451 | seg += in; | |
452 | } | |
453 | heads[headcount - 1].len += datalen; | |
454 | *iovcount = seg; | |
455 | if (unlikely(log)) | |
456 | *log_num = nlogs; | |
457 | return headcount; | |
458 | err: | |
459 | vhost_discard_vq_desc(vq, headcount); | |
460 | return r; | |
461 | } | |
462 | ||
3a4d5c94 MT |
463 | /* Expects to be always run from workqueue - which acts as |
464 | * read-size critical section for our kind of RCU. */ | |
94249369 | 465 | static void handle_rx(struct vhost_net *net) |
3a4d5c94 | 466 | { |
8dd014ad DS |
467 | struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; |
468 | unsigned uninitialized_var(in), log; | |
469 | struct vhost_log *vq_log; | |
470 | struct msghdr msg = { | |
471 | .msg_name = NULL, | |
472 | .msg_namelen = 0, | |
473 | .msg_control = NULL, /* FIXME: get and handle RX aux data. */ | |
474 | .msg_controllen = 0, | |
475 | .msg_iov = vq->iov, | |
476 | .msg_flags = MSG_DONTWAIT, | |
477 | }; | |
8dd014ad DS |
478 | struct virtio_net_hdr_mrg_rxbuf hdr = { |
479 | .hdr.flags = 0, | |
480 | .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE | |
481 | }; | |
8dd014ad | 482 | size_t total_len = 0; |
910a578f MT |
483 | int err, mergeable; |
484 | s16 headcount; | |
8dd014ad DS |
485 | size_t vhost_hlen, sock_hlen; |
486 | size_t vhost_len, sock_len; | |
5e18247b MT |
487 | /* TODO: check that we are running from vhost_worker? */ |
488 | struct socket *sock = rcu_dereference_check(vq->private_data, 1); | |
d47effe1 | 489 | |
de4d768a | 490 | if (!sock) |
8dd014ad DS |
491 | return; |
492 | ||
8dd014ad | 493 | mutex_lock(&vq->mutex); |
8ea8cf89 | 494 | vhost_disable_notify(&net->dev, vq); |
8dd014ad DS |
495 | vhost_hlen = vq->vhost_hlen; |
496 | sock_hlen = vq->sock_hlen; | |
497 | ||
498 | vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ? | |
499 | vq->log : NULL; | |
cfbdab95 | 500 | mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF); |
8dd014ad DS |
501 | |
502 | while ((sock_len = peek_head_len(sock->sk))) { | |
503 | sock_len += sock_hlen; | |
504 | vhost_len = sock_len + vhost_hlen; | |
505 | headcount = get_rx_bufs(vq, vq->heads, vhost_len, | |
94249369 JW |
506 | &in, vq_log, &log, |
507 | likely(mergeable) ? UIO_MAXIOV : 1); | |
8dd014ad DS |
508 | /* On error, stop handling until the next kick. */ |
509 | if (unlikely(headcount < 0)) | |
510 | break; | |
511 | /* OK, now we need to know about added descriptors. */ | |
512 | if (!headcount) { | |
8ea8cf89 | 513 | if (unlikely(vhost_enable_notify(&net->dev, vq))) { |
8dd014ad DS |
514 | /* They have slipped one in as we were |
515 | * doing that: check again. */ | |
8ea8cf89 | 516 | vhost_disable_notify(&net->dev, vq); |
8dd014ad DS |
517 | continue; |
518 | } | |
519 | /* Nothing new? Wait for eventfd to tell us | |
520 | * they refilled. */ | |
521 | break; | |
522 | } | |
523 | /* We don't need to be notified again. */ | |
524 | if (unlikely((vhost_hlen))) | |
525 | /* Skip header. TODO: support TSO. */ | |
526 | move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in); | |
527 | else | |
528 | /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF: | |
a290aec8 | 529 | * needed because recvmsg can modify msg_iov. */ |
8dd014ad DS |
530 | copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in); |
531 | msg.msg_iovlen = in; | |
532 | err = sock->ops->recvmsg(NULL, sock, &msg, | |
533 | sock_len, MSG_DONTWAIT | MSG_TRUNC); | |
534 | /* Userspace might have consumed the packet meanwhile: | |
535 | * it's not supposed to do this usually, but might be hard | |
536 | * to prevent. Discard data we got (if any) and keep going. */ | |
537 | if (unlikely(err != sock_len)) { | |
538 | pr_debug("Discarded rx packet: " | |
539 | " len %d, expected %zd\n", err, sock_len); | |
540 | vhost_discard_vq_desc(vq, headcount); | |
541 | continue; | |
542 | } | |
543 | if (unlikely(vhost_hlen) && | |
544 | memcpy_toiovecend(vq->hdr, (unsigned char *)&hdr, 0, | |
545 | vhost_hlen)) { | |
546 | vq_err(vq, "Unable to write vnet_hdr at addr %p\n", | |
547 | vq->iov->iov_base); | |
548 | break; | |
549 | } | |
550 | /* TODO: Should check and handle checksum. */ | |
cfbdab95 | 551 | if (likely(mergeable) && |
8dd014ad DS |
552 | memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount, |
553 | offsetof(typeof(hdr), num_buffers), | |
554 | sizeof hdr.num_buffers)) { | |
555 | vq_err(vq, "Failed num_buffers write"); | |
556 | vhost_discard_vq_desc(vq, headcount); | |
557 | break; | |
558 | } | |
559 | vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, | |
560 | headcount); | |
561 | if (unlikely(vq_log)) | |
562 | vhost_log_write(vq, vq_log, log, vhost_len); | |
563 | total_len += vhost_len; | |
564 | if (unlikely(total_len >= VHOST_NET_WEIGHT)) { | |
565 | vhost_poll_queue(&vq->poll); | |
566 | break; | |
567 | } | |
568 | } | |
569 | ||
570 | mutex_unlock(&vq->mutex); | |
8dd014ad DS |
571 | } |
572 | ||
c23f3445 | 573 | static void handle_tx_kick(struct vhost_work *work) |
3a4d5c94 | 574 | { |
c23f3445 TH |
575 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, |
576 | poll.work); | |
577 | struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); | |
578 | ||
3a4d5c94 MT |
579 | handle_tx(net); |
580 | } | |
581 | ||
c23f3445 | 582 | static void handle_rx_kick(struct vhost_work *work) |
3a4d5c94 | 583 | { |
c23f3445 TH |
584 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, |
585 | poll.work); | |
586 | struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); | |
587 | ||
3a4d5c94 MT |
588 | handle_rx(net); |
589 | } | |
590 | ||
c23f3445 | 591 | static void handle_tx_net(struct vhost_work *work) |
3a4d5c94 | 592 | { |
c23f3445 TH |
593 | struct vhost_net *net = container_of(work, struct vhost_net, |
594 | poll[VHOST_NET_VQ_TX].work); | |
3a4d5c94 MT |
595 | handle_tx(net); |
596 | } | |
597 | ||
c23f3445 | 598 | static void handle_rx_net(struct vhost_work *work) |
3a4d5c94 | 599 | { |
c23f3445 TH |
600 | struct vhost_net *net = container_of(work, struct vhost_net, |
601 | poll[VHOST_NET_VQ_RX].work); | |
3a4d5c94 MT |
602 | handle_rx(net); |
603 | } | |
604 | ||
605 | static int vhost_net_open(struct inode *inode, struct file *f) | |
606 | { | |
607 | struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL); | |
c23f3445 | 608 | struct vhost_dev *dev; |
3a4d5c94 | 609 | int r; |
c23f3445 | 610 | |
3a4d5c94 MT |
611 | if (!n) |
612 | return -ENOMEM; | |
c23f3445 TH |
613 | |
614 | dev = &n->dev; | |
3a4d5c94 MT |
615 | n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick; |
616 | n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick; | |
c23f3445 | 617 | r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX); |
3a4d5c94 MT |
618 | if (r < 0) { |
619 | kfree(n); | |
620 | return r; | |
621 | } | |
622 | ||
c23f3445 TH |
623 | vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev); |
624 | vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev); | |
3a4d5c94 MT |
625 | n->tx_poll_state = VHOST_NET_POLL_DISABLED; |
626 | ||
627 | f->private_data = n; | |
628 | ||
629 | return 0; | |
630 | } | |
631 | ||
632 | static void vhost_net_disable_vq(struct vhost_net *n, | |
633 | struct vhost_virtqueue *vq) | |
634 | { | |
635 | if (!vq->private_data) | |
636 | return; | |
637 | if (vq == n->vqs + VHOST_NET_VQ_TX) { | |
638 | tx_poll_stop(n); | |
639 | n->tx_poll_state = VHOST_NET_POLL_DISABLED; | |
640 | } else | |
641 | vhost_poll_stop(n->poll + VHOST_NET_VQ_RX); | |
642 | } | |
643 | ||
644 | static void vhost_net_enable_vq(struct vhost_net *n, | |
645 | struct vhost_virtqueue *vq) | |
646 | { | |
28457ee6 AB |
647 | struct socket *sock; |
648 | ||
649 | sock = rcu_dereference_protected(vq->private_data, | |
650 | lockdep_is_held(&vq->mutex)); | |
3a4d5c94 MT |
651 | if (!sock) |
652 | return; | |
653 | if (vq == n->vqs + VHOST_NET_VQ_TX) { | |
654 | n->tx_poll_state = VHOST_NET_POLL_STOPPED; | |
655 | tx_poll_start(n, sock); | |
656 | } else | |
657 | vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); | |
658 | } | |
659 | ||
660 | static struct socket *vhost_net_stop_vq(struct vhost_net *n, | |
661 | struct vhost_virtqueue *vq) | |
662 | { | |
663 | struct socket *sock; | |
664 | ||
665 | mutex_lock(&vq->mutex); | |
28457ee6 AB |
666 | sock = rcu_dereference_protected(vq->private_data, |
667 | lockdep_is_held(&vq->mutex)); | |
3a4d5c94 MT |
668 | vhost_net_disable_vq(n, vq); |
669 | rcu_assign_pointer(vq->private_data, NULL); | |
670 | mutex_unlock(&vq->mutex); | |
671 | return sock; | |
672 | } | |
673 | ||
674 | static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, | |
675 | struct socket **rx_sock) | |
676 | { | |
677 | *tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX); | |
678 | *rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX); | |
679 | } | |
680 | ||
681 | static void vhost_net_flush_vq(struct vhost_net *n, int index) | |
682 | { | |
683 | vhost_poll_flush(n->poll + index); | |
684 | vhost_poll_flush(&n->dev.vqs[index].poll); | |
685 | } | |
686 | ||
687 | static void vhost_net_flush(struct vhost_net *n) | |
688 | { | |
689 | vhost_net_flush_vq(n, VHOST_NET_VQ_TX); | |
690 | vhost_net_flush_vq(n, VHOST_NET_VQ_RX); | |
1280c27f MT |
691 | if (n->dev.vqs[VHOST_NET_VQ_TX].ubufs) { |
692 | mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); | |
693 | n->tx_flush = true; | |
694 | mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); | |
695 | /* Wait for all lower device DMAs done. */ | |
696 | vhost_ubuf_put_and_wait(n->dev.vqs[VHOST_NET_VQ_TX].ubufs); | |
697 | mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); | |
698 | n->tx_flush = false; | |
699 | kref_init(&n->dev.vqs[VHOST_NET_VQ_TX].ubufs->kref); | |
700 | mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); | |
701 | } | |
3a4d5c94 MT |
702 | } |
703 | ||
704 | static int vhost_net_release(struct inode *inode, struct file *f) | |
705 | { | |
706 | struct vhost_net *n = f->private_data; | |
707 | struct socket *tx_sock; | |
708 | struct socket *rx_sock; | |
709 | ||
710 | vhost_net_stop(n, &tx_sock, &rx_sock); | |
711 | vhost_net_flush(n); | |
b211616d | 712 | vhost_dev_stop(&n->dev); |
ea5d4046 | 713 | vhost_dev_cleanup(&n->dev, false); |
3a4d5c94 MT |
714 | if (tx_sock) |
715 | fput(tx_sock->file); | |
716 | if (rx_sock) | |
717 | fput(rx_sock->file); | |
718 | /* We do an extra flush before freeing memory, | |
719 | * since jobs can re-queue themselves. */ | |
720 | vhost_net_flush(n); | |
721 | kfree(n); | |
722 | return 0; | |
723 | } | |
724 | ||
725 | static struct socket *get_raw_socket(int fd) | |
726 | { | |
727 | struct { | |
728 | struct sockaddr_ll sa; | |
729 | char buf[MAX_ADDR_LEN]; | |
730 | } uaddr; | |
731 | int uaddr_len = sizeof uaddr, r; | |
732 | struct socket *sock = sockfd_lookup(fd, &r); | |
d47effe1 | 733 | |
3a4d5c94 MT |
734 | if (!sock) |
735 | return ERR_PTR(-ENOTSOCK); | |
736 | ||
737 | /* Parameter checking */ | |
738 | if (sock->sk->sk_type != SOCK_RAW) { | |
739 | r = -ESOCKTNOSUPPORT; | |
740 | goto err; | |
741 | } | |
742 | ||
743 | r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, | |
744 | &uaddr_len, 0); | |
745 | if (r) | |
746 | goto err; | |
747 | ||
748 | if (uaddr.sa.sll_family != AF_PACKET) { | |
749 | r = -EPFNOSUPPORT; | |
750 | goto err; | |
751 | } | |
752 | return sock; | |
753 | err: | |
754 | fput(sock->file); | |
755 | return ERR_PTR(r); | |
756 | } | |
757 | ||
501c774c | 758 | static struct socket *get_tap_socket(int fd) |
3a4d5c94 MT |
759 | { |
760 | struct file *file = fget(fd); | |
761 | struct socket *sock; | |
d47effe1 | 762 | |
3a4d5c94 MT |
763 | if (!file) |
764 | return ERR_PTR(-EBADF); | |
765 | sock = tun_get_socket(file); | |
501c774c AB |
766 | if (!IS_ERR(sock)) |
767 | return sock; | |
768 | sock = macvtap_get_socket(file); | |
3a4d5c94 MT |
769 | if (IS_ERR(sock)) |
770 | fput(file); | |
771 | return sock; | |
772 | } | |
773 | ||
774 | static struct socket *get_socket(int fd) | |
775 | { | |
776 | struct socket *sock; | |
d47effe1 | 777 | |
3a4d5c94 MT |
778 | /* special case to disable backend */ |
779 | if (fd == -1) | |
780 | return NULL; | |
781 | sock = get_raw_socket(fd); | |
782 | if (!IS_ERR(sock)) | |
783 | return sock; | |
501c774c | 784 | sock = get_tap_socket(fd); |
3a4d5c94 MT |
785 | if (!IS_ERR(sock)) |
786 | return sock; | |
787 | return ERR_PTR(-ENOTSOCK); | |
788 | } | |
789 | ||
790 | static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |
791 | { | |
792 | struct socket *sock, *oldsock; | |
793 | struct vhost_virtqueue *vq; | |
bab632d6 | 794 | struct vhost_ubuf_ref *ubufs, *oldubufs = NULL; |
3a4d5c94 MT |
795 | int r; |
796 | ||
797 | mutex_lock(&n->dev.mutex); | |
798 | r = vhost_dev_check_owner(&n->dev); | |
799 | if (r) | |
800 | goto err; | |
801 | ||
802 | if (index >= VHOST_NET_VQ_MAX) { | |
803 | r = -ENOBUFS; | |
804 | goto err; | |
805 | } | |
806 | vq = n->vqs + index; | |
807 | mutex_lock(&vq->mutex); | |
808 | ||
809 | /* Verify that ring has been setup correctly. */ | |
810 | if (!vhost_vq_access_ok(vq)) { | |
811 | r = -EFAULT; | |
1dace8c8 | 812 | goto err_vq; |
3a4d5c94 MT |
813 | } |
814 | sock = get_socket(fd); | |
815 | if (IS_ERR(sock)) { | |
816 | r = PTR_ERR(sock); | |
1dace8c8 | 817 | goto err_vq; |
3a4d5c94 MT |
818 | } |
819 | ||
820 | /* start polling new socket */ | |
28457ee6 AB |
821 | oldsock = rcu_dereference_protected(vq->private_data, |
822 | lockdep_is_held(&vq->mutex)); | |
11fe8839 | 823 | if (sock != oldsock) { |
bab632d6 MT |
824 | ubufs = vhost_ubuf_alloc(vq, sock && vhost_sock_zcopy(sock)); |
825 | if (IS_ERR(ubufs)) { | |
826 | r = PTR_ERR(ubufs); | |
827 | goto err_ubufs; | |
828 | } | |
829 | oldubufs = vq->ubufs; | |
830 | vq->ubufs = ubufs; | |
d47effe1 KK |
831 | vhost_net_disable_vq(n, vq); |
832 | rcu_assign_pointer(vq->private_data, sock); | |
833 | vhost_net_enable_vq(n, vq); | |
f59281da JW |
834 | |
835 | r = vhost_init_used(vq); | |
836 | if (r) | |
837 | goto err_vq; | |
64e9a9b8 MT |
838 | |
839 | n->tx_packets = 0; | |
840 | n->tx_zcopy_err = 0; | |
1280c27f | 841 | n->tx_flush = false; |
dd1f4078 | 842 | } |
3a4d5c94 | 843 | |
1680e906 MT |
844 | mutex_unlock(&vq->mutex); |
845 | ||
c047e5f3 | 846 | if (oldubufs) { |
bab632d6 | 847 | vhost_ubuf_put_and_wait(oldubufs); |
c047e5f3 | 848 | mutex_lock(&vq->mutex); |
eaae8132 | 849 | vhost_zerocopy_signal_used(n, vq); |
c047e5f3 MT |
850 | mutex_unlock(&vq->mutex); |
851 | } | |
bab632d6 | 852 | |
3a4d5c94 MT |
853 | if (oldsock) { |
854 | vhost_net_flush_vq(n, index); | |
855 | fput(oldsock->file); | |
856 | } | |
1dace8c8 | 857 | |
1680e906 MT |
858 | mutex_unlock(&n->dev.mutex); |
859 | return 0; | |
860 | ||
bab632d6 MT |
861 | err_ubufs: |
862 | fput(sock->file); | |
1dace8c8 JD |
863 | err_vq: |
864 | mutex_unlock(&vq->mutex); | |
3a4d5c94 MT |
865 | err: |
866 | mutex_unlock(&n->dev.mutex); | |
867 | return r; | |
868 | } | |
869 | ||
870 | static long vhost_net_reset_owner(struct vhost_net *n) | |
871 | { | |
872 | struct socket *tx_sock = NULL; | |
873 | struct socket *rx_sock = NULL; | |
874 | long err; | |
d47effe1 | 875 | |
3a4d5c94 MT |
876 | mutex_lock(&n->dev.mutex); |
877 | err = vhost_dev_check_owner(&n->dev); | |
878 | if (err) | |
879 | goto done; | |
880 | vhost_net_stop(n, &tx_sock, &rx_sock); | |
881 | vhost_net_flush(n); | |
882 | err = vhost_dev_reset_owner(&n->dev); | |
883 | done: | |
884 | mutex_unlock(&n->dev.mutex); | |
885 | if (tx_sock) | |
886 | fput(tx_sock->file); | |
887 | if (rx_sock) | |
888 | fput(rx_sock->file); | |
889 | return err; | |
890 | } | |
891 | ||
892 | static int vhost_net_set_features(struct vhost_net *n, u64 features) | |
893 | { | |
8dd014ad | 894 | size_t vhost_hlen, sock_hlen, hdr_len; |
3a4d5c94 | 895 | int i; |
8dd014ad DS |
896 | |
897 | hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? | |
898 | sizeof(struct virtio_net_hdr_mrg_rxbuf) : | |
899 | sizeof(struct virtio_net_hdr); | |
900 | if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) { | |
901 | /* vhost provides vnet_hdr */ | |
902 | vhost_hlen = hdr_len; | |
903 | sock_hlen = 0; | |
904 | } else { | |
905 | /* socket provides vnet_hdr */ | |
906 | vhost_hlen = 0; | |
907 | sock_hlen = hdr_len; | |
908 | } | |
3a4d5c94 MT |
909 | mutex_lock(&n->dev.mutex); |
910 | if ((features & (1 << VHOST_F_LOG_ALL)) && | |
911 | !vhost_log_access_ok(&n->dev)) { | |
912 | mutex_unlock(&n->dev.mutex); | |
913 | return -EFAULT; | |
914 | } | |
915 | n->dev.acked_features = features; | |
916 | smp_wmb(); | |
917 | for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { | |
918 | mutex_lock(&n->vqs[i].mutex); | |
8dd014ad DS |
919 | n->vqs[i].vhost_hlen = vhost_hlen; |
920 | n->vqs[i].sock_hlen = sock_hlen; | |
3a4d5c94 MT |
921 | mutex_unlock(&n->vqs[i].mutex); |
922 | } | |
923 | vhost_net_flush(n); | |
924 | mutex_unlock(&n->dev.mutex); | |
925 | return 0; | |
926 | } | |
927 | ||
928 | static long vhost_net_ioctl(struct file *f, unsigned int ioctl, | |
929 | unsigned long arg) | |
930 | { | |
931 | struct vhost_net *n = f->private_data; | |
932 | void __user *argp = (void __user *)arg; | |
933 | u64 __user *featurep = argp; | |
934 | struct vhost_vring_file backend; | |
935 | u64 features; | |
936 | int r; | |
d47effe1 | 937 | |
3a4d5c94 MT |
938 | switch (ioctl) { |
939 | case VHOST_NET_SET_BACKEND: | |
d3553a52 TY |
940 | if (copy_from_user(&backend, argp, sizeof backend)) |
941 | return -EFAULT; | |
3a4d5c94 MT |
942 | return vhost_net_set_backend(n, backend.index, backend.fd); |
943 | case VHOST_GET_FEATURES: | |
0dd05a3b | 944 | features = VHOST_NET_FEATURES; |
d3553a52 TY |
945 | if (copy_to_user(featurep, &features, sizeof features)) |
946 | return -EFAULT; | |
947 | return 0; | |
3a4d5c94 | 948 | case VHOST_SET_FEATURES: |
d3553a52 TY |
949 | if (copy_from_user(&features, featurep, sizeof features)) |
950 | return -EFAULT; | |
0dd05a3b | 951 | if (features & ~VHOST_NET_FEATURES) |
3a4d5c94 MT |
952 | return -EOPNOTSUPP; |
953 | return vhost_net_set_features(n, features); | |
954 | case VHOST_RESET_OWNER: | |
955 | return vhost_net_reset_owner(n); | |
956 | default: | |
957 | mutex_lock(&n->dev.mutex); | |
935cdee7 MT |
958 | r = vhost_dev_ioctl(&n->dev, ioctl, argp); |
959 | if (r == -ENOIOCTLCMD) | |
960 | r = vhost_vring_ioctl(&n->dev, ioctl, argp); | |
961 | else | |
962 | vhost_net_flush(n); | |
3a4d5c94 MT |
963 | mutex_unlock(&n->dev.mutex); |
964 | return r; | |
965 | } | |
966 | } | |
967 | ||
968 | #ifdef CONFIG_COMPAT | |
969 | static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl, | |
970 | unsigned long arg) | |
971 | { | |
972 | return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); | |
973 | } | |
974 | #endif | |
975 | ||
373a83a6 | 976 | static const struct file_operations vhost_net_fops = { |
3a4d5c94 MT |
977 | .owner = THIS_MODULE, |
978 | .release = vhost_net_release, | |
979 | .unlocked_ioctl = vhost_net_ioctl, | |
980 | #ifdef CONFIG_COMPAT | |
981 | .compat_ioctl = vhost_net_compat_ioctl, | |
982 | #endif | |
983 | .open = vhost_net_open, | |
6038f373 | 984 | .llseek = noop_llseek, |
3a4d5c94 MT |
985 | }; |
986 | ||
987 | static struct miscdevice vhost_net_misc = { | |
7c7c7f01 | 988 | .minor = VHOST_NET_MINOR, |
989 | .name = "vhost-net", | |
990 | .fops = &vhost_net_fops, | |
3a4d5c94 MT |
991 | }; |
992 | ||
a8d3782f | 993 | static int vhost_net_init(void) |
3a4d5c94 | 994 | { |
bab632d6 MT |
995 | if (experimental_zcopytx) |
996 | vhost_enable_zcopy(VHOST_NET_VQ_TX); | |
c23f3445 | 997 | return misc_register(&vhost_net_misc); |
3a4d5c94 MT |
998 | } |
999 | module_init(vhost_net_init); | |
1000 | ||
a8d3782f | 1001 | static void vhost_net_exit(void) |
3a4d5c94 MT |
1002 | { |
1003 | misc_deregister(&vhost_net_misc); | |
3a4d5c94 MT |
1004 | } |
1005 | module_exit(vhost_net_exit); | |
1006 | ||
1007 | MODULE_VERSION("0.0.1"); | |
1008 | MODULE_LICENSE("GPL v2"); | |
1009 | MODULE_AUTHOR("Michael S. Tsirkin"); | |
1010 | MODULE_DESCRIPTION("Host kernel accelerator for virtio net"); | |
7c7c7f01 | 1011 | MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR); |
1012 | MODULE_ALIAS("devname:vhost-net"); |