Commit | Line | Data |
---|---|---|
48925e37 | 1 | /* A network driver using virtio. |
296f96fc RR |
2 | * |
3 | * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
18 | */ | |
19 | //#define DEBUG | |
20 | #include <linux/netdevice.h> | |
21 | #include <linux/etherdevice.h> | |
a9ea3fc6 | 22 | #include <linux/ethtool.h> |
296f96fc RR |
23 | #include <linux/module.h> |
24 | #include <linux/virtio.h> | |
25 | #include <linux/virtio_net.h> | |
26 | #include <linux/scatterlist.h> | |
e918085a | 27 | #include <linux/if_vlan.h> |
5a0e3ad6 | 28 | #include <linux/slab.h> |
8de4b2f3 | 29 | #include <linux/cpu.h> |
296f96fc | 30 | |
d34710e3 | 31 | static int napi_weight = NAPI_POLL_WEIGHT; |
6c0cd7c0 DL |
32 | module_param(napi_weight, int, 0444); |
33 | ||
eb939922 | 34 | static bool csum = true, gso = true; |
34a48579 RR |
35 | module_param(csum, bool, 0444); |
36 | module_param(gso, bool, 0444); | |
37 | ||
296f96fc | 38 | /* FIXME: MTU in config. */ |
5061de36 MD |
39 | #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
40 | #define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \ | |
41 | sizeof(struct virtio_net_hdr_mrg_rxbuf), \ | |
42 | L1_CACHE_BYTES)) | |
3f2c31d9 | 43 | #define GOOD_COPY_LEN 128 |
296f96fc | 44 | |
66846048 | 45 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
2a41f71d | 46 | |
3fa2a1df | 47 | struct virtnet_stats { |
83a27052 ED |
48 | struct u64_stats_sync tx_syncp; |
49 | struct u64_stats_sync rx_syncp; | |
3fa2a1df | 50 | u64 tx_bytes; |
51 | u64 tx_packets; | |
52 | ||
53 | u64 rx_bytes; | |
54 | u64 rx_packets; | |
55 | }; | |
56 | ||
e9d7417b JW |
57 | /* Internal representation of a send virtqueue */ |
58 | struct send_queue { | |
59 | /* Virtqueue associated with this send _queue */ | |
60 | struct virtqueue *vq; | |
61 | ||
62 | /* TX: fragments + linear part + virtio header */ | |
63 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; | |
986a4f4d JW |
64 | |
65 | /* Name of the send queue: output.$index */ | |
66 | char name[40]; | |
e9d7417b JW |
67 | }; |
68 | ||
69 | /* Internal representation of a receive virtqueue */ | |
70 | struct receive_queue { | |
71 | /* Virtqueue associated with this receive_queue */ | |
72 | struct virtqueue *vq; | |
73 | ||
296f96fc RR |
74 | struct napi_struct napi; |
75 | ||
76 | /* Number of input buffers, and max we've ever had. */ | |
77 | unsigned int num, max; | |
78 | ||
e9d7417b JW |
79 | /* Chain pages by the private ptr. */ |
80 | struct page *pages; | |
81 | ||
82 | /* RX: fragments + linear part + virtio header */ | |
83 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; | |
986a4f4d JW |
84 | |
85 | /* Name of this receive queue: input.$index */ | |
86 | char name[40]; | |
e9d7417b JW |
87 | }; |
88 | ||
89 | struct virtnet_info { | |
90 | struct virtio_device *vdev; | |
91 | struct virtqueue *cvq; | |
92 | struct net_device *dev; | |
986a4f4d JW |
93 | struct send_queue *sq; |
94 | struct receive_queue *rq; | |
e9d7417b JW |
95 | unsigned int status; |
96 | ||
986a4f4d JW |
97 | /* Max # of queue pairs supported by the device */ |
98 | u16 max_queue_pairs; | |
99 | ||
100 | /* # of queue pairs currently used by the driver */ | |
101 | u16 curr_queue_pairs; | |
102 | ||
97402b96 HX |
103 | /* I like... big packets and I cannot lie! */ |
104 | bool big_packets; | |
105 | ||
3f2c31d9 MM |
106 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ |
107 | bool mergeable_rx_bufs; | |
108 | ||
986a4f4d JW |
109 | /* Has control virtqueue */ |
110 | bool has_cvq; | |
111 | ||
e7428e95 MT |
112 | /* Host can handle any s/g split between our header and packet data */ |
113 | bool any_header_sg; | |
114 | ||
586d17c5 JW |
115 | /* enable config space updates */ |
116 | bool config_enable; | |
117 | ||
3fa2a1df | 118 | /* Active statistics */ |
119 | struct virtnet_stats __percpu *stats; | |
120 | ||
3161e453 RR |
121 | /* Work struct for refilling if we run low on memory. */ |
122 | struct delayed_work refill; | |
123 | ||
586d17c5 JW |
124 | /* Work struct for config space updates */ |
125 | struct work_struct config_work; | |
126 | ||
127 | /* Lock for config space updates */ | |
128 | struct mutex config_lock; | |
986a4f4d | 129 | |
2613af0e MD |
130 | /* Page_frag for GFP_KERNEL packet buffer allocation when we run |
131 | * low on memory. | |
132 | */ | |
133 | struct page_frag alloc_frag; | |
134 | ||
986a4f4d JW |
135 | /* Does the affinity hint is set for virtqueues? */ |
136 | bool affinity_hint_set; | |
47be2479 | 137 | |
8de4b2f3 WG |
138 | /* CPU hot plug notifier */ |
139 | struct notifier_block nb; | |
296f96fc RR |
140 | }; |
141 | ||
b3f24698 RR |
142 | struct skb_vnet_hdr { |
143 | union { | |
144 | struct virtio_net_hdr hdr; | |
145 | struct virtio_net_hdr_mrg_rxbuf mhdr; | |
146 | }; | |
147 | }; | |
148 | ||
9ab86bbc SM |
149 | struct padded_vnet_hdr { |
150 | struct virtio_net_hdr hdr; | |
151 | /* | |
152 | * virtio_net_hdr should be in a separated sg buffer because of a | |
153 | * QEMU bug, and data sg buffer shares same page with this header sg. | |
154 | * This padding makes next sg 16 byte aligned after virtio_net_hdr. | |
155 | */ | |
156 | char padding[6]; | |
157 | }; | |
158 | ||
986a4f4d JW |
159 | /* Converting between virtqueue no. and kernel tx/rx queue no. |
160 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq | |
161 | */ | |
162 | static int vq2txq(struct virtqueue *vq) | |
163 | { | |
9d0ca6ed | 164 | return (vq->index - 1) / 2; |
986a4f4d JW |
165 | } |
166 | ||
167 | static int txq2vq(int txq) | |
168 | { | |
169 | return txq * 2 + 1; | |
170 | } | |
171 | ||
172 | static int vq2rxq(struct virtqueue *vq) | |
173 | { | |
9d0ca6ed | 174 | return vq->index / 2; |
986a4f4d JW |
175 | } |
176 | ||
177 | static int rxq2vq(int rxq) | |
178 | { | |
179 | return rxq * 2; | |
180 | } | |
181 | ||
b3f24698 | 182 | static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) |
296f96fc | 183 | { |
b3f24698 | 184 | return (struct skb_vnet_hdr *)skb->cb; |
296f96fc RR |
185 | } |
186 | ||
9ab86bbc SM |
187 | /* |
188 | * private is used to chain pages for big packets, put the whole | |
189 | * most recent used list in the beginning for reuse | |
190 | */ | |
e9d7417b | 191 | static void give_pages(struct receive_queue *rq, struct page *page) |
0a888fd1 | 192 | { |
9ab86bbc | 193 | struct page *end; |
0a888fd1 | 194 | |
e9d7417b | 195 | /* Find end of list, sew whole thing into vi->rq.pages. */ |
9ab86bbc | 196 | for (end = page; end->private; end = (struct page *)end->private); |
e9d7417b JW |
197 | end->private = (unsigned long)rq->pages; |
198 | rq->pages = page; | |
0a888fd1 MM |
199 | } |
200 | ||
e9d7417b | 201 | static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) |
fb6813f4 | 202 | { |
e9d7417b | 203 | struct page *p = rq->pages; |
fb6813f4 | 204 | |
9ab86bbc | 205 | if (p) { |
e9d7417b | 206 | rq->pages = (struct page *)p->private; |
9ab86bbc SM |
207 | /* clear private here, it is used to chain pages */ |
208 | p->private = 0; | |
209 | } else | |
fb6813f4 RR |
210 | p = alloc_page(gfp_mask); |
211 | return p; | |
212 | } | |
213 | ||
e9d7417b | 214 | static void skb_xmit_done(struct virtqueue *vq) |
296f96fc | 215 | { |
e9d7417b | 216 | struct virtnet_info *vi = vq->vdev->priv; |
296f96fc | 217 | |
2cb9c6ba | 218 | /* Suppress further interrupts. */ |
e9d7417b | 219 | virtqueue_disable_cb(vq); |
11a3a154 | 220 | |
363f1514 | 221 | /* We were probably waiting for more output buffers. */ |
986a4f4d | 222 | netif_wake_subqueue(vi->dev, vq2txq(vq)); |
296f96fc RR |
223 | } |
224 | ||
3464645a | 225 | /* Called from bottom half context */ |
e9d7417b | 226 | static struct sk_buff *page_to_skb(struct receive_queue *rq, |
2613af0e MD |
227 | struct page *page, unsigned int offset, |
228 | unsigned int len, unsigned int truesize) | |
9ab86bbc | 229 | { |
e9d7417b | 230 | struct virtnet_info *vi = rq->vq->vdev->priv; |
9ab86bbc SM |
231 | struct sk_buff *skb; |
232 | struct skb_vnet_hdr *hdr; | |
2613af0e | 233 | unsigned int copy, hdr_len, hdr_padded_len; |
9ab86bbc | 234 | char *p; |
fb6813f4 | 235 | |
2613af0e | 236 | p = page_address(page) + offset; |
3f2c31d9 | 237 | |
9ab86bbc SM |
238 | /* copy small packet so we can reuse these pages for small data */ |
239 | skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); | |
240 | if (unlikely(!skb)) | |
241 | return NULL; | |
3f2c31d9 | 242 | |
9ab86bbc | 243 | hdr = skb_vnet_hdr(skb); |
3f2c31d9 | 244 | |
9ab86bbc SM |
245 | if (vi->mergeable_rx_bufs) { |
246 | hdr_len = sizeof hdr->mhdr; | |
2613af0e | 247 | hdr_padded_len = sizeof hdr->mhdr; |
9ab86bbc SM |
248 | } else { |
249 | hdr_len = sizeof hdr->hdr; | |
2613af0e | 250 | hdr_padded_len = sizeof(struct padded_vnet_hdr); |
9ab86bbc | 251 | } |
3f2c31d9 | 252 | |
9ab86bbc | 253 | memcpy(hdr, p, hdr_len); |
3f2c31d9 | 254 | |
9ab86bbc | 255 | len -= hdr_len; |
2613af0e MD |
256 | offset += hdr_padded_len; |
257 | p += hdr_padded_len; | |
3f2c31d9 | 258 | |
9ab86bbc SM |
259 | copy = len; |
260 | if (copy > skb_tailroom(skb)) | |
261 | copy = skb_tailroom(skb); | |
262 | memcpy(skb_put(skb, copy), p, copy); | |
3f2c31d9 | 263 | |
9ab86bbc SM |
264 | len -= copy; |
265 | offset += copy; | |
3f2c31d9 | 266 | |
2613af0e MD |
267 | if (vi->mergeable_rx_bufs) { |
268 | if (len) | |
269 | skb_add_rx_frag(skb, 0, page, offset, len, truesize); | |
270 | else | |
271 | put_page(page); | |
272 | return skb; | |
273 | } | |
274 | ||
e878d78b SL |
275 | /* |
276 | * Verify that we can indeed put this data into a skb. | |
277 | * This is here to handle cases when the device erroneously | |
278 | * tries to receive more than is possible. This is usually | |
279 | * the case of a broken device. | |
280 | */ | |
281 | if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { | |
be443899 | 282 | net_dbg_ratelimited("%s: too much data\n", skb->dev->name); |
e878d78b SL |
283 | dev_kfree_skb(skb); |
284 | return NULL; | |
285 | } | |
2613af0e | 286 | BUG_ON(offset >= PAGE_SIZE); |
9ab86bbc | 287 | while (len) { |
2613af0e MD |
288 | unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); |
289 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, | |
290 | frag_size, truesize); | |
291 | len -= frag_size; | |
9ab86bbc SM |
292 | page = (struct page *)page->private; |
293 | offset = 0; | |
294 | } | |
3f2c31d9 | 295 | |
9ab86bbc | 296 | if (page) |
e9d7417b | 297 | give_pages(rq, page); |
3f2c31d9 | 298 | |
9ab86bbc SM |
299 | return skb; |
300 | } | |
3f2c31d9 | 301 | |
2613af0e | 302 | static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) |
9ab86bbc | 303 | { |
2613af0e MD |
304 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb); |
305 | struct sk_buff *curr_skb = head_skb; | |
306 | char *buf; | |
9ab86bbc | 307 | struct page *page; |
ba275241 | 308 | int num_buf, len, offset; |
9ab86bbc SM |
309 | |
310 | num_buf = hdr->mhdr.num_buffers; | |
311 | while (--num_buf) { | |
2613af0e MD |
312 | int num_skb_frags = skb_shinfo(curr_skb)->nr_frags; |
313 | buf = virtqueue_get_buf(rq->vq, &len); | |
314 | if (unlikely(!buf)) { | |
9ab86bbc | 315 | pr_debug("%s: rx error: %d buffers missing\n", |
2613af0e MD |
316 | head_skb->dev->name, hdr->mhdr.num_buffers); |
317 | head_skb->dev->stats.rx_length_errors++; | |
9ab86bbc | 318 | return -EINVAL; |
3f2c31d9 | 319 | } |
5061de36 | 320 | if (unlikely(len > MERGE_BUFFER_LEN)) { |
2613af0e MD |
321 | pr_debug("%s: rx error: merge buffer too long\n", |
322 | head_skb->dev->name); | |
5061de36 | 323 | len = MERGE_BUFFER_LEN; |
2613af0e MD |
324 | } |
325 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { | |
326 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); | |
327 | if (unlikely(!nskb)) { | |
328 | head_skb->dev->stats.rx_dropped++; | |
329 | return -ENOMEM; | |
330 | } | |
331 | if (curr_skb == head_skb) | |
332 | skb_shinfo(curr_skb)->frag_list = nskb; | |
333 | else | |
334 | curr_skb->next = nskb; | |
335 | curr_skb = nskb; | |
336 | head_skb->truesize += nskb->truesize; | |
337 | num_skb_frags = 0; | |
338 | } | |
339 | if (curr_skb != head_skb) { | |
340 | head_skb->data_len += len; | |
341 | head_skb->len += len; | |
5061de36 | 342 | head_skb->truesize += MERGE_BUFFER_LEN; |
2613af0e MD |
343 | } |
344 | page = virt_to_head_page(buf); | |
ba275241 JW |
345 | offset = buf - (char *)page_address(page); |
346 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { | |
347 | put_page(page); | |
348 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, | |
5061de36 | 349 | len, MERGE_BUFFER_LEN); |
ba275241 JW |
350 | } else { |
351 | skb_add_rx_frag(curr_skb, num_skb_frags, page, | |
5061de36 | 352 | offset, len, MERGE_BUFFER_LEN); |
ba275241 | 353 | } |
e9d7417b | 354 | --rq->num; |
9ab86bbc SM |
355 | } |
356 | return 0; | |
357 | } | |
358 | ||
e9d7417b | 359 | static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) |
9ab86bbc | 360 | { |
e9d7417b JW |
361 | struct virtnet_info *vi = rq->vq->vdev->priv; |
362 | struct net_device *dev = vi->dev; | |
58472a76 | 363 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
9ab86bbc SM |
364 | struct sk_buff *skb; |
365 | struct page *page; | |
366 | struct skb_vnet_hdr *hdr; | |
3f2c31d9 | 367 | |
9ab86bbc SM |
368 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { |
369 | pr_debug("%s: short packet %i\n", dev->name, len); | |
370 | dev->stats.rx_length_errors++; | |
2613af0e | 371 | if (vi->big_packets) |
e9d7417b | 372 | give_pages(rq, buf); |
2613af0e MD |
373 | else if (vi->mergeable_rx_bufs) |
374 | put_page(virt_to_head_page(buf)); | |
9ab86bbc SM |
375 | else |
376 | dev_kfree_skb(buf); | |
377 | return; | |
378 | } | |
3f2c31d9 | 379 | |
9ab86bbc SM |
380 | if (!vi->mergeable_rx_bufs && !vi->big_packets) { |
381 | skb = buf; | |
382 | len -= sizeof(struct virtio_net_hdr); | |
383 | skb_trim(skb, len); | |
2613af0e MD |
384 | } else if (vi->mergeable_rx_bufs) { |
385 | struct page *page = virt_to_head_page(buf); | |
386 | skb = page_to_skb(rq, page, | |
387 | (char *)buf - (char *)page_address(page), | |
5061de36 | 388 | len, MERGE_BUFFER_LEN); |
2613af0e MD |
389 | if (unlikely(!skb)) { |
390 | dev->stats.rx_dropped++; | |
391 | put_page(page); | |
392 | return; | |
393 | } | |
394 | if (receive_mergeable(rq, skb)) { | |
395 | dev_kfree_skb(skb); | |
396 | return; | |
397 | } | |
9ab86bbc SM |
398 | } else { |
399 | page = buf; | |
2613af0e | 400 | skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); |
9ab86bbc | 401 | if (unlikely(!skb)) { |
3f2c31d9 | 402 | dev->stats.rx_dropped++; |
e9d7417b | 403 | give_pages(rq, page); |
9ab86bbc | 404 | return; |
3f2c31d9 | 405 | } |
97402b96 | 406 | } |
3f2c31d9 | 407 | |
9ab86bbc | 408 | hdr = skb_vnet_hdr(skb); |
3fa2a1df | 409 | |
83a27052 | 410 | u64_stats_update_begin(&stats->rx_syncp); |
3fa2a1df | 411 | stats->rx_bytes += skb->len; |
412 | stats->rx_packets++; | |
83a27052 | 413 | u64_stats_update_end(&stats->rx_syncp); |
296f96fc | 414 | |
b3f24698 | 415 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
296f96fc | 416 | pr_debug("Needs csum!\n"); |
b3f24698 RR |
417 | if (!skb_partial_csum_set(skb, |
418 | hdr->hdr.csum_start, | |
419 | hdr->hdr.csum_offset)) | |
296f96fc | 420 | goto frame_err; |
10a8d94a JW |
421 | } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { |
422 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
296f96fc RR |
423 | } |
424 | ||
23cde76d MM |
425 | skb->protocol = eth_type_trans(skb, dev); |
426 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", | |
427 | ntohs(skb->protocol), skb->len, skb->pkt_type); | |
428 | ||
b3f24698 | 429 | if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
296f96fc | 430 | pr_debug("GSO!\n"); |
b3f24698 | 431 | switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
296f96fc | 432 | case VIRTIO_NET_HDR_GSO_TCPV4: |
c9af6db4 | 433 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
296f96fc | 434 | break; |
296f96fc | 435 | case VIRTIO_NET_HDR_GSO_UDP: |
c9af6db4 | 436 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; |
296f96fc RR |
437 | break; |
438 | case VIRTIO_NET_HDR_GSO_TCPV6: | |
c9af6db4 | 439 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
296f96fc RR |
440 | break; |
441 | default: | |
be443899 AW |
442 | net_warn_ratelimited("%s: bad gso type %u.\n", |
443 | dev->name, hdr->hdr.gso_type); | |
296f96fc RR |
444 | goto frame_err; |
445 | } | |
446 | ||
b3f24698 | 447 | if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) |
c9af6db4 | 448 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; |
34a48579 | 449 | |
b3f24698 | 450 | skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; |
296f96fc | 451 | if (skb_shinfo(skb)->gso_size == 0) { |
be443899 | 452 | net_warn_ratelimited("%s: zero gso size.\n", dev->name); |
296f96fc RR |
453 | goto frame_err; |
454 | } | |
455 | ||
456 | /* Header must be checked, and gso_segs computed. */ | |
457 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | |
458 | skb_shinfo(skb)->gso_segs = 0; | |
459 | } | |
460 | ||
461 | netif_receive_skb(skb); | |
462 | return; | |
463 | ||
464 | frame_err: | |
465 | dev->stats.rx_frame_errors++; | |
296f96fc RR |
466 | dev_kfree_skb(skb); |
467 | } | |
468 | ||
e9d7417b | 469 | static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) |
296f96fc | 470 | { |
e9d7417b | 471 | struct virtnet_info *vi = rq->vq->vdev->priv; |
296f96fc | 472 | struct sk_buff *skb; |
9ab86bbc | 473 | struct skb_vnet_hdr *hdr; |
9ab86bbc | 474 | int err; |
3f2c31d9 | 475 | |
5061de36 | 476 | skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); |
9ab86bbc SM |
477 | if (unlikely(!skb)) |
478 | return -ENOMEM; | |
296f96fc | 479 | |
5061de36 | 480 | skb_put(skb, GOOD_PACKET_LEN); |
3f2c31d9 | 481 | |
9ab86bbc | 482 | hdr = skb_vnet_hdr(skb); |
e9d7417b | 483 | sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); |
97402b96 | 484 | |
e9d7417b | 485 | skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); |
97402b96 | 486 | |
9dc7b9e4 | 487 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); |
9ab86bbc SM |
488 | if (err < 0) |
489 | dev_kfree_skb(skb); | |
97402b96 | 490 | |
9ab86bbc SM |
491 | return err; |
492 | } | |
97402b96 | 493 | |
e9d7417b | 494 | static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) |
9ab86bbc | 495 | { |
9ab86bbc SM |
496 | struct page *first, *list = NULL; |
497 | char *p; | |
498 | int i, err, offset; | |
499 | ||
e9d7417b | 500 | /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ |
9ab86bbc | 501 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
e9d7417b | 502 | first = get_a_page(rq, gfp); |
9ab86bbc SM |
503 | if (!first) { |
504 | if (list) | |
e9d7417b | 505 | give_pages(rq, list); |
9ab86bbc | 506 | return -ENOMEM; |
97402b96 | 507 | } |
e9d7417b | 508 | sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); |
97402b96 | 509 | |
9ab86bbc SM |
510 | /* chain new page in list head to match sg */ |
511 | first->private = (unsigned long)list; | |
512 | list = first; | |
513 | } | |
296f96fc | 514 | |
e9d7417b | 515 | first = get_a_page(rq, gfp); |
9ab86bbc | 516 | if (!first) { |
e9d7417b | 517 | give_pages(rq, list); |
9ab86bbc SM |
518 | return -ENOMEM; |
519 | } | |
520 | p = page_address(first); | |
521 | ||
e9d7417b JW |
522 | /* rq->sg[0], rq->sg[1] share the same page */ |
523 | /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ | |
524 | sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); | |
9ab86bbc | 525 | |
e9d7417b | 526 | /* rq->sg[1] for data packet, from offset */ |
9ab86bbc | 527 | offset = sizeof(struct padded_vnet_hdr); |
e9d7417b | 528 | sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); |
9ab86bbc SM |
529 | |
530 | /* chain first in list head */ | |
531 | first->private = (unsigned long)list; | |
9dc7b9e4 RR |
532 | err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, |
533 | first, gfp); | |
9ab86bbc | 534 | if (err < 0) |
e9d7417b | 535 | give_pages(rq, first); |
9ab86bbc SM |
536 | |
537 | return err; | |
296f96fc RR |
538 | } |
539 | ||
e9d7417b | 540 | static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) |
3f2c31d9 | 541 | { |
2613af0e MD |
542 | struct virtnet_info *vi = rq->vq->vdev->priv; |
543 | char *buf = NULL; | |
3f2c31d9 | 544 | int err; |
3f2c31d9 | 545 | |
2613af0e | 546 | if (gfp & __GFP_WAIT) { |
5061de36 | 547 | if (skb_page_frag_refill(MERGE_BUFFER_LEN, &vi->alloc_frag, |
2613af0e MD |
548 | gfp)) { |
549 | buf = (char *)page_address(vi->alloc_frag.page) + | |
550 | vi->alloc_frag.offset; | |
551 | get_page(vi->alloc_frag.page); | |
5061de36 | 552 | vi->alloc_frag.offset += MERGE_BUFFER_LEN; |
2613af0e MD |
553 | } |
554 | } else { | |
5061de36 | 555 | buf = netdev_alloc_frag(MERGE_BUFFER_LEN); |
2613af0e MD |
556 | } |
557 | if (!buf) | |
9ab86bbc | 558 | return -ENOMEM; |
3f2c31d9 | 559 | |
5061de36 | 560 | sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN); |
2613af0e | 561 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp); |
9ab86bbc | 562 | if (err < 0) |
2613af0e | 563 | put_page(virt_to_head_page(buf)); |
3f2c31d9 | 564 | |
9ab86bbc SM |
565 | return err; |
566 | } | |
3f2c31d9 | 567 | |
b2baed69 RR |
568 | /* |
569 | * Returns false if we couldn't fill entirely (OOM). | |
570 | * | |
571 | * Normally run in the receive path, but can also be run from ndo_open | |
572 | * before we're receiving packets, or from refill_work which is | |
573 | * careful to disable receiving (using napi_disable). | |
574 | */ | |
e9d7417b | 575 | static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) |
9ab86bbc | 576 | { |
e9d7417b | 577 | struct virtnet_info *vi = rq->vq->vdev->priv; |
9ab86bbc | 578 | int err; |
1788f495 | 579 | bool oom; |
3f2c31d9 | 580 | |
9ab86bbc SM |
581 | do { |
582 | if (vi->mergeable_rx_bufs) | |
e9d7417b | 583 | err = add_recvbuf_mergeable(rq, gfp); |
9ab86bbc | 584 | else if (vi->big_packets) |
e9d7417b | 585 | err = add_recvbuf_big(rq, gfp); |
9ab86bbc | 586 | else |
e9d7417b | 587 | err = add_recvbuf_small(rq, gfp); |
3f2c31d9 | 588 | |
1788f495 | 589 | oom = err == -ENOMEM; |
9ed4cb07 | 590 | if (err) |
3f2c31d9 | 591 | break; |
e9d7417b | 592 | ++rq->num; |
b7dfde95 | 593 | } while (rq->vq->num_free); |
e9d7417b JW |
594 | if (unlikely(rq->num > rq->max)) |
595 | rq->max = rq->num; | |
67975901 HG |
596 | if (unlikely(!virtqueue_kick(rq->vq))) |
597 | return false; | |
3161e453 | 598 | return !oom; |
3f2c31d9 MM |
599 | } |
600 | ||
18445c4d | 601 | static void skb_recv_done(struct virtqueue *rvq) |
296f96fc RR |
602 | { |
603 | struct virtnet_info *vi = rvq->vdev->priv; | |
986a4f4d | 604 | struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; |
e9d7417b | 605 | |
18445c4d | 606 | /* Schedule NAPI, Suppress further interrupts if successful. */ |
e9d7417b | 607 | if (napi_schedule_prep(&rq->napi)) { |
1915a712 | 608 | virtqueue_disable_cb(rvq); |
e9d7417b | 609 | __napi_schedule(&rq->napi); |
18445c4d | 610 | } |
296f96fc RR |
611 | } |
612 | ||
e9d7417b | 613 | static void virtnet_napi_enable(struct receive_queue *rq) |
3e9d08ec | 614 | { |
e9d7417b | 615 | napi_enable(&rq->napi); |
3e9d08ec BR |
616 | |
617 | /* If all buffers were filled by other side before we napi_enabled, we | |
618 | * won't get another interrupt, so process any outstanding packets | |
619 | * now. virtnet_poll wants re-enable the queue, so we disable here. | |
620 | * We synchronize against interrupts via NAPI_STATE_SCHED */ | |
e9d7417b JW |
621 | if (napi_schedule_prep(&rq->napi)) { |
622 | virtqueue_disable_cb(rq->vq); | |
ec13ee80 | 623 | local_bh_disable(); |
e9d7417b | 624 | __napi_schedule(&rq->napi); |
ec13ee80 | 625 | local_bh_enable(); |
3e9d08ec BR |
626 | } |
627 | } | |
628 | ||
3161e453 RR |
629 | static void refill_work(struct work_struct *work) |
630 | { | |
e9d7417b JW |
631 | struct virtnet_info *vi = |
632 | container_of(work, struct virtnet_info, refill.work); | |
3161e453 | 633 | bool still_empty; |
986a4f4d JW |
634 | int i; |
635 | ||
55257d72 | 636 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
986a4f4d | 637 | struct receive_queue *rq = &vi->rq[i]; |
3161e453 | 638 | |
986a4f4d JW |
639 | napi_disable(&rq->napi); |
640 | still_empty = !try_fill_recv(rq, GFP_KERNEL); | |
641 | virtnet_napi_enable(rq); | |
3161e453 | 642 | |
986a4f4d JW |
643 | /* In theory, this can happen: if we don't get any buffers in |
644 | * we will *never* try to fill again. | |
645 | */ | |
646 | if (still_empty) | |
647 | schedule_delayed_work(&vi->refill, HZ/2); | |
648 | } | |
3161e453 RR |
649 | } |
650 | ||
296f96fc RR |
651 | static int virtnet_poll(struct napi_struct *napi, int budget) |
652 | { | |
e9d7417b JW |
653 | struct receive_queue *rq = |
654 | container_of(napi, struct receive_queue, napi); | |
655 | struct virtnet_info *vi = rq->vq->vdev->priv; | |
9ab86bbc | 656 | void *buf; |
cbdadbbf | 657 | unsigned int r, len, received = 0; |
296f96fc RR |
658 | |
659 | again: | |
660 | while (received < budget && | |
e9d7417b JW |
661 | (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { |
662 | receive_buf(rq, buf, len); | |
663 | --rq->num; | |
296f96fc RR |
664 | received++; |
665 | } | |
666 | ||
e9d7417b JW |
667 | if (rq->num < rq->max / 2) { |
668 | if (!try_fill_recv(rq, GFP_ATOMIC)) | |
3b07e9ca | 669 | schedule_delayed_work(&vi->refill, 0); |
3161e453 | 670 | } |
296f96fc | 671 | |
8329d98e RR |
672 | /* Out of packets? */ |
673 | if (received < budget) { | |
cbdadbbf | 674 | r = virtqueue_enable_cb_prepare(rq->vq); |
288379f0 | 675 | napi_complete(napi); |
cbdadbbf | 676 | if (unlikely(virtqueue_poll(rq->vq, r)) && |
8e95a202 | 677 | napi_schedule_prep(napi)) { |
e9d7417b | 678 | virtqueue_disable_cb(rq->vq); |
288379f0 | 679 | __napi_schedule(napi); |
296f96fc | 680 | goto again; |
4265f161 | 681 | } |
296f96fc RR |
682 | } |
683 | ||
684 | return received; | |
685 | } | |
686 | ||
986a4f4d JW |
687 | static int virtnet_open(struct net_device *dev) |
688 | { | |
689 | struct virtnet_info *vi = netdev_priv(dev); | |
690 | int i; | |
691 | ||
e4166625 JW |
692 | for (i = 0; i < vi->max_queue_pairs; i++) { |
693 | if (i < vi->curr_queue_pairs) | |
694 | /* Make sure we have some buffers: if oom use wq. */ | |
695 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) | |
696 | schedule_delayed_work(&vi->refill, 0); | |
986a4f4d JW |
697 | virtnet_napi_enable(&vi->rq[i]); |
698 | } | |
699 | ||
700 | return 0; | |
701 | } | |
702 | ||
b7dfde95 | 703 | static void free_old_xmit_skbs(struct send_queue *sq) |
296f96fc RR |
704 | { |
705 | struct sk_buff *skb; | |
6ee57bcc | 706 | unsigned int len; |
e9d7417b | 707 | struct virtnet_info *vi = sq->vq->vdev->priv; |
58472a76 | 708 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
296f96fc | 709 | |
e9d7417b | 710 | while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
296f96fc | 711 | pr_debug("Sent skb %p\n", skb); |
3fa2a1df | 712 | |
83a27052 | 713 | u64_stats_update_begin(&stats->tx_syncp); |
3fa2a1df | 714 | stats->tx_bytes += skb->len; |
715 | stats->tx_packets++; | |
83a27052 | 716 | u64_stats_update_end(&stats->tx_syncp); |
3fa2a1df | 717 | |
ed79bab8 | 718 | dev_kfree_skb_any(skb); |
296f96fc RR |
719 | } |
720 | } | |
721 | ||
e9d7417b | 722 | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
296f96fc | 723 | { |
e7428e95 | 724 | struct skb_vnet_hdr *hdr; |
296f96fc | 725 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
e9d7417b | 726 | struct virtnet_info *vi = sq->vq->vdev->priv; |
7bedc7dc | 727 | unsigned num_sg; |
e7428e95 MT |
728 | unsigned hdr_len; |
729 | bool can_push; | |
296f96fc | 730 | |
e174961c | 731 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
e7428e95 MT |
732 | if (vi->mergeable_rx_bufs) |
733 | hdr_len = sizeof hdr->mhdr; | |
734 | else | |
735 | hdr_len = sizeof hdr->hdr; | |
736 | ||
737 | can_push = vi->any_header_sg && | |
738 | !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && | |
739 | !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; | |
740 | /* Even if we can, don't push here yet as this would skew | |
741 | * csum_start offset below. */ | |
742 | if (can_push) | |
743 | hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len); | |
744 | else | |
745 | hdr = skb_vnet_hdr(skb); | |
296f96fc | 746 | |
296f96fc | 747 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
b3f24698 | 748 | hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
55508d60 | 749 | hdr->hdr.csum_start = skb_checksum_start_offset(skb); |
b3f24698 | 750 | hdr->hdr.csum_offset = skb->csum_offset; |
296f96fc | 751 | } else { |
b3f24698 RR |
752 | hdr->hdr.flags = 0; |
753 | hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; | |
296f96fc RR |
754 | } |
755 | ||
756 | if (skb_is_gso(skb)) { | |
b3f24698 RR |
757 | hdr->hdr.hdr_len = skb_headlen(skb); |
758 | hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; | |
34a48579 | 759 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
b3f24698 | 760 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
296f96fc | 761 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
b3f24698 | 762 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
296f96fc | 763 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
b3f24698 | 764 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; |
296f96fc RR |
765 | else |
766 | BUG(); | |
34a48579 | 767 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) |
b3f24698 | 768 | hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; |
296f96fc | 769 | } else { |
b3f24698 RR |
770 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; |
771 | hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; | |
296f96fc RR |
772 | } |
773 | ||
3f2c31d9 | 774 | if (vi->mergeable_rx_bufs) |
e7428e95 | 775 | hdr->mhdr.num_buffers = 0; |
3f2c31d9 | 776 | |
e7428e95 MT |
777 | if (can_push) { |
778 | __skb_push(skb, hdr_len); | |
779 | num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); | |
780 | /* Pull header back to avoid skew in tx bytes calculations. */ | |
781 | __skb_pull(skb, hdr_len); | |
782 | } else { | |
783 | sg_set_buf(sq->sg, hdr, hdr_len); | |
784 | num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; | |
785 | } | |
9dc7b9e4 | 786 | return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); |
11a3a154 RR |
787 | } |
788 | ||
424efe9c | 789 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
99ffc696 RR |
790 | { |
791 | struct virtnet_info *vi = netdev_priv(dev); | |
986a4f4d JW |
792 | int qnum = skb_get_queue_mapping(skb); |
793 | struct send_queue *sq = &vi->sq[qnum]; | |
9ed4cb07 | 794 | int err; |
2cb9c6ba | 795 | |
2cb9c6ba | 796 | /* Free up any pending old buffers before queueing new ones. */ |
e9d7417b | 797 | free_old_xmit_skbs(sq); |
99ffc696 | 798 | |
03f191ba | 799 | /* Try to transmit */ |
b7dfde95 | 800 | err = xmit_skb(sq, skb); |
48925e37 | 801 | |
9ed4cb07 | 802 | /* This should not happen! */ |
67975901 | 803 | if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) { |
9ed4cb07 RR |
804 | dev->stats.tx_fifo_errors++; |
805 | if (net_ratelimit()) | |
806 | dev_warn(&dev->dev, | |
b7dfde95 | 807 | "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); |
58eba97d RR |
808 | dev->stats.tx_dropped++; |
809 | kfree_skb(skb); | |
810 | return NETDEV_TX_OK; | |
296f96fc | 811 | } |
03f191ba | 812 | |
48925e37 RR |
813 | /* Don't wait up for transmitted skbs to be freed. */ |
814 | skb_orphan(skb); | |
815 | nf_reset(skb); | |
816 | ||
817 | /* Apparently nice girls don't return TX_BUSY; stop the queue | |
818 | * before it gets out of hand. Naturally, this wastes entries. */ | |
b7dfde95 | 819 | if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { |
986a4f4d | 820 | netif_stop_subqueue(dev, qnum); |
e9d7417b | 821 | if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { |
48925e37 | 822 | /* More just got used, free them then recheck. */ |
b7dfde95 LT |
823 | free_old_xmit_skbs(sq); |
824 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { | |
986a4f4d | 825 | netif_start_subqueue(dev, qnum); |
e9d7417b | 826 | virtqueue_disable_cb(sq->vq); |
48925e37 RR |
827 | } |
828 | } | |
99ffc696 | 829 | } |
48925e37 RR |
830 | |
831 | return NETDEV_TX_OK; | |
296f96fc RR |
832 | } |
833 | ||
40cbfc37 AK |
834 | /* |
835 | * Send command via the control virtqueue and check status. Commands | |
836 | * supported by the hypervisor, as indicated by feature bits, should | |
837 | * never fail unless improperly formated. | |
838 | */ | |
839 | static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, | |
f7bc9594 RR |
840 | struct scatterlist *out, |
841 | struct scatterlist *in) | |
40cbfc37 | 842 | { |
f7bc9594 | 843 | struct scatterlist *sgs[4], hdr, stat; |
40cbfc37 AK |
844 | struct virtio_net_ctrl_hdr ctrl; |
845 | virtio_net_ctrl_ack status = ~0; | |
f7bc9594 | 846 | unsigned out_num = 0, in_num = 0, tmp; |
40cbfc37 AK |
847 | |
848 | /* Caller should know better */ | |
f7bc9594 | 849 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); |
40cbfc37 AK |
850 | |
851 | ctrl.class = class; | |
852 | ctrl.cmd = cmd; | |
f7bc9594 RR |
853 | /* Add header */ |
854 | sg_init_one(&hdr, &ctrl, sizeof(ctrl)); | |
855 | sgs[out_num++] = &hdr; | |
40cbfc37 | 856 | |
f7bc9594 RR |
857 | if (out) |
858 | sgs[out_num++] = out; | |
859 | if (in) | |
860 | sgs[out_num + in_num++] = in; | |
40cbfc37 | 861 | |
f7bc9594 RR |
862 | /* Add return status. */ |
863 | sg_init_one(&stat, &status, sizeof(status)); | |
864 | sgs[out_num + in_num++] = &stat; | |
40cbfc37 | 865 | |
f7bc9594 RR |
866 | BUG_ON(out_num + in_num > ARRAY_SIZE(sgs)); |
867 | BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC) | |
868 | < 0); | |
40cbfc37 | 869 | |
67975901 HG |
870 | if (unlikely(!virtqueue_kick(vi->cvq))) |
871 | return status == VIRTIO_NET_OK; | |
40cbfc37 AK |
872 | |
873 | /* Spin for a response, the kick causes an ioport write, trapping | |
874 | * into the hypervisor, so the request should be handled immediately. | |
875 | */ | |
047b9b94 HG |
876 | while (!virtqueue_get_buf(vi->cvq, &tmp) && |
877 | !virtqueue_is_broken(vi->cvq)) | |
40cbfc37 AK |
878 | cpu_relax(); |
879 | ||
880 | return status == VIRTIO_NET_OK; | |
881 | } | |
882 | ||
9c46f6d4 AW |
883 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
884 | { | |
885 | struct virtnet_info *vi = netdev_priv(dev); | |
886 | struct virtio_device *vdev = vi->vdev; | |
f2f2c8b4 | 887 | int ret; |
7e58d5ae AK |
888 | struct sockaddr *addr = p; |
889 | struct scatterlist sg; | |
9c46f6d4 | 890 | |
7e58d5ae | 891 | ret = eth_prepare_mac_addr_change(dev, p); |
f2f2c8b4 JP |
892 | if (ret) |
893 | return ret; | |
9c46f6d4 | 894 | |
7e58d5ae AK |
895 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { |
896 | sg_init_one(&sg, addr->sa_data, dev->addr_len); | |
897 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, | |
898 | VIRTIO_NET_CTRL_MAC_ADDR_SET, | |
f7bc9594 | 899 | &sg, NULL)) { |
7e58d5ae AK |
900 | dev_warn(&vdev->dev, |
901 | "Failed to set mac address by vq command.\n"); | |
902 | return -EINVAL; | |
903 | } | |
904 | } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { | |
855e0c52 RR |
905 | unsigned int i; |
906 | ||
907 | /* Naturally, this has an atomicity problem. */ | |
908 | for (i = 0; i < dev->addr_len; i++) | |
909 | virtio_cwrite8(vdev, | |
910 | offsetof(struct virtio_net_config, mac) + | |
911 | i, addr->sa_data[i]); | |
7e58d5ae AK |
912 | } |
913 | ||
914 | eth_commit_mac_addr_change(dev, p); | |
9c46f6d4 AW |
915 | |
916 | return 0; | |
917 | } | |
918 | ||
3fa2a1df | 919 | static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, |
920 | struct rtnl_link_stats64 *tot) | |
921 | { | |
922 | struct virtnet_info *vi = netdev_priv(dev); | |
923 | int cpu; | |
924 | unsigned int start; | |
925 | ||
926 | for_each_possible_cpu(cpu) { | |
58472a76 | 927 | struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); |
3fa2a1df | 928 | u64 tpackets, tbytes, rpackets, rbytes; |
929 | ||
930 | do { | |
e3906486 | 931 | start = u64_stats_fetch_begin_bh(&stats->tx_syncp); |
3fa2a1df | 932 | tpackets = stats->tx_packets; |
933 | tbytes = stats->tx_bytes; | |
e3906486 | 934 | } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start)); |
83a27052 ED |
935 | |
936 | do { | |
e3906486 | 937 | start = u64_stats_fetch_begin_bh(&stats->rx_syncp); |
3fa2a1df | 938 | rpackets = stats->rx_packets; |
939 | rbytes = stats->rx_bytes; | |
e3906486 | 940 | } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start)); |
3fa2a1df | 941 | |
942 | tot->rx_packets += rpackets; | |
943 | tot->tx_packets += tpackets; | |
944 | tot->rx_bytes += rbytes; | |
945 | tot->tx_bytes += tbytes; | |
946 | } | |
947 | ||
948 | tot->tx_dropped = dev->stats.tx_dropped; | |
021ac8d3 | 949 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; |
3fa2a1df | 950 | tot->rx_dropped = dev->stats.rx_dropped; |
951 | tot->rx_length_errors = dev->stats.rx_length_errors; | |
952 | tot->rx_frame_errors = dev->stats.rx_frame_errors; | |
953 | ||
954 | return tot; | |
955 | } | |
956 | ||
da74e89d AS |
957 | #ifdef CONFIG_NET_POLL_CONTROLLER |
958 | static void virtnet_netpoll(struct net_device *dev) | |
959 | { | |
960 | struct virtnet_info *vi = netdev_priv(dev); | |
986a4f4d | 961 | int i; |
da74e89d | 962 | |
986a4f4d JW |
963 | for (i = 0; i < vi->curr_queue_pairs; i++) |
964 | napi_schedule(&vi->rq[i].napi); | |
da74e89d AS |
965 | } |
966 | #endif | |
967 | ||
586d17c5 JW |
968 | static void virtnet_ack_link_announce(struct virtnet_info *vi) |
969 | { | |
970 | rtnl_lock(); | |
971 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, | |
f7bc9594 | 972 | VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL)) |
586d17c5 JW |
973 | dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); |
974 | rtnl_unlock(); | |
975 | } | |
976 | ||
986a4f4d JW |
977 | static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
978 | { | |
979 | struct scatterlist sg; | |
980 | struct virtio_net_ctrl_mq s; | |
981 | struct net_device *dev = vi->dev; | |
982 | ||
983 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) | |
984 | return 0; | |
985 | ||
986 | s.virtqueue_pairs = queue_pairs; | |
987 | sg_init_one(&sg, &s, sizeof(s)); | |
988 | ||
989 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, | |
f7bc9594 | 990 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) { |
986a4f4d JW |
991 | dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", |
992 | queue_pairs); | |
993 | return -EINVAL; | |
55257d72 | 994 | } else { |
986a4f4d | 995 | vi->curr_queue_pairs = queue_pairs; |
35ed159b JW |
996 | /* virtnet_open() will refill when device is going to up. */ |
997 | if (dev->flags & IFF_UP) | |
998 | schedule_delayed_work(&vi->refill, 0); | |
55257d72 | 999 | } |
986a4f4d JW |
1000 | |
1001 | return 0; | |
1002 | } | |
1003 | ||
296f96fc RR |
1004 | static int virtnet_close(struct net_device *dev) |
1005 | { | |
1006 | struct virtnet_info *vi = netdev_priv(dev); | |
986a4f4d | 1007 | int i; |
296f96fc | 1008 | |
b2baed69 RR |
1009 | /* Make sure refill_work doesn't re-enable napi! */ |
1010 | cancel_delayed_work_sync(&vi->refill); | |
986a4f4d JW |
1011 | |
1012 | for (i = 0; i < vi->max_queue_pairs; i++) | |
1013 | napi_disable(&vi->rq[i].napi); | |
296f96fc | 1014 | |
296f96fc RR |
1015 | return 0; |
1016 | } | |
1017 | ||
2af7698e AW |
1018 | static void virtnet_set_rx_mode(struct net_device *dev) |
1019 | { | |
1020 | struct virtnet_info *vi = netdev_priv(dev); | |
f565a7c2 | 1021 | struct scatterlist sg[2]; |
2af7698e | 1022 | u8 promisc, allmulti; |
f565a7c2 | 1023 | struct virtio_net_ctrl_mac *mac_data; |
ccffad25 | 1024 | struct netdev_hw_addr *ha; |
32e7bfc4 | 1025 | int uc_count; |
4cd24eaf | 1026 | int mc_count; |
f565a7c2 AW |
1027 | void *buf; |
1028 | int i; | |
2af7698e AW |
1029 | |
1030 | /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */ | |
1031 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) | |
1032 | return; | |
1033 | ||
f565a7c2 AW |
1034 | promisc = ((dev->flags & IFF_PROMISC) != 0); |
1035 | allmulti = ((dev->flags & IFF_ALLMULTI) != 0); | |
2af7698e | 1036 | |
23e258e1 | 1037 | sg_init_one(sg, &promisc, sizeof(promisc)); |
2af7698e AW |
1038 | |
1039 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, | |
1040 | VIRTIO_NET_CTRL_RX_PROMISC, | |
f7bc9594 | 1041 | sg, NULL)) |
2af7698e AW |
1042 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
1043 | promisc ? "en" : "dis"); | |
1044 | ||
23e258e1 | 1045 | sg_init_one(sg, &allmulti, sizeof(allmulti)); |
2af7698e AW |
1046 | |
1047 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, | |
1048 | VIRTIO_NET_CTRL_RX_ALLMULTI, | |
f7bc9594 | 1049 | sg, NULL)) |
2af7698e AW |
1050 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
1051 | allmulti ? "en" : "dis"); | |
f565a7c2 | 1052 | |
32e7bfc4 | 1053 | uc_count = netdev_uc_count(dev); |
4cd24eaf | 1054 | mc_count = netdev_mc_count(dev); |
f565a7c2 | 1055 | /* MAC filter - use one buffer for both lists */ |
4cd24eaf JP |
1056 | buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + |
1057 | (2 * sizeof(mac_data->entries)), GFP_ATOMIC); | |
1058 | mac_data = buf; | |
e68ed8f0 | 1059 | if (!buf) |
f565a7c2 | 1060 | return; |
f565a7c2 | 1061 | |
23e258e1 AW |
1062 | sg_init_table(sg, 2); |
1063 | ||
f565a7c2 | 1064 | /* Store the unicast list and count in the front of the buffer */ |
32e7bfc4 | 1065 | mac_data->entries = uc_count; |
ccffad25 | 1066 | i = 0; |
32e7bfc4 | 1067 | netdev_for_each_uc_addr(ha, dev) |
ccffad25 | 1068 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
f565a7c2 AW |
1069 | |
1070 | sg_set_buf(&sg[0], mac_data, | |
32e7bfc4 | 1071 | sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); |
f565a7c2 AW |
1072 | |
1073 | /* multicast list and count fill the end */ | |
32e7bfc4 | 1074 | mac_data = (void *)&mac_data->macs[uc_count][0]; |
f565a7c2 | 1075 | |
4cd24eaf | 1076 | mac_data->entries = mc_count; |
567ec874 | 1077 | i = 0; |
22bedad3 JP |
1078 | netdev_for_each_mc_addr(ha, dev) |
1079 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); | |
f565a7c2 AW |
1080 | |
1081 | sg_set_buf(&sg[1], mac_data, | |
4cd24eaf | 1082 | sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); |
f565a7c2 AW |
1083 | |
1084 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, | |
1085 | VIRTIO_NET_CTRL_MAC_TABLE_SET, | |
f7bc9594 | 1086 | sg, NULL)) |
f565a7c2 AW |
1087 | dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); |
1088 | ||
1089 | kfree(buf); | |
2af7698e AW |
1090 | } |
1091 | ||
80d5c368 PM |
1092 | static int virtnet_vlan_rx_add_vid(struct net_device *dev, |
1093 | __be16 proto, u16 vid) | |
0bde9569 AW |
1094 | { |
1095 | struct virtnet_info *vi = netdev_priv(dev); | |
1096 | struct scatterlist sg; | |
1097 | ||
23e258e1 | 1098 | sg_init_one(&sg, &vid, sizeof(vid)); |
0bde9569 AW |
1099 | |
1100 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, | |
f7bc9594 | 1101 | VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL)) |
0bde9569 | 1102 | dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); |
8e586137 | 1103 | return 0; |
0bde9569 AW |
1104 | } |
1105 | ||
80d5c368 PM |
1106 | static int virtnet_vlan_rx_kill_vid(struct net_device *dev, |
1107 | __be16 proto, u16 vid) | |
0bde9569 AW |
1108 | { |
1109 | struct virtnet_info *vi = netdev_priv(dev); | |
1110 | struct scatterlist sg; | |
1111 | ||
23e258e1 | 1112 | sg_init_one(&sg, &vid, sizeof(vid)); |
0bde9569 AW |
1113 | |
1114 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, | |
f7bc9594 | 1115 | VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL)) |
0bde9569 | 1116 | dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); |
8e586137 | 1117 | return 0; |
0bde9569 AW |
1118 | } |
1119 | ||
8898c21c | 1120 | static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) |
986a4f4d JW |
1121 | { |
1122 | int i; | |
1123 | ||
8898c21c WG |
1124 | if (vi->affinity_hint_set) { |
1125 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
47be2479 WG |
1126 | virtqueue_set_affinity(vi->rq[i].vq, -1); |
1127 | virtqueue_set_affinity(vi->sq[i].vq, -1); | |
1128 | } | |
1129 | ||
8898c21c WG |
1130 | vi->affinity_hint_set = false; |
1131 | } | |
8898c21c | 1132 | } |
47be2479 | 1133 | |
8898c21c WG |
1134 | static void virtnet_set_affinity(struct virtnet_info *vi) |
1135 | { | |
1136 | int i; | |
1137 | int cpu; | |
986a4f4d JW |
1138 | |
1139 | /* In multiqueue mode, when the number of cpu is equal to the number of | |
1140 | * queue pairs, we let the queue pairs to be private to one cpu by | |
1141 | * setting the affinity hint to eliminate the contention. | |
1142 | */ | |
8898c21c WG |
1143 | if (vi->curr_queue_pairs == 1 || |
1144 | vi->max_queue_pairs != num_online_cpus()) { | |
1145 | virtnet_clean_affinity(vi, -1); | |
1146 | return; | |
986a4f4d JW |
1147 | } |
1148 | ||
8898c21c WG |
1149 | i = 0; |
1150 | for_each_online_cpu(cpu) { | |
986a4f4d JW |
1151 | virtqueue_set_affinity(vi->rq[i].vq, cpu); |
1152 | virtqueue_set_affinity(vi->sq[i].vq, cpu); | |
9bb8ca86 | 1153 | netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); |
8898c21c | 1154 | i++; |
986a4f4d JW |
1155 | } |
1156 | ||
8898c21c | 1157 | vi->affinity_hint_set = true; |
986a4f4d JW |
1158 | } |
1159 | ||
8de4b2f3 WG |
1160 | static int virtnet_cpu_callback(struct notifier_block *nfb, |
1161 | unsigned long action, void *hcpu) | |
1162 | { | |
1163 | struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); | |
1164 | ||
1165 | switch(action & ~CPU_TASKS_FROZEN) { | |
1166 | case CPU_ONLINE: | |
1167 | case CPU_DOWN_FAILED: | |
1168 | case CPU_DEAD: | |
1169 | virtnet_set_affinity(vi); | |
1170 | break; | |
1171 | case CPU_DOWN_PREPARE: | |
1172 | virtnet_clean_affinity(vi, (long)hcpu); | |
1173 | break; | |
1174 | default: | |
1175 | break; | |
1176 | } | |
3ab098df | 1177 | |
8de4b2f3 | 1178 | return NOTIFY_OK; |
986a4f4d JW |
1179 | } |
1180 | ||
8f9f4668 RJ |
1181 | static void virtnet_get_ringparam(struct net_device *dev, |
1182 | struct ethtool_ringparam *ring) | |
1183 | { | |
1184 | struct virtnet_info *vi = netdev_priv(dev); | |
1185 | ||
986a4f4d JW |
1186 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); |
1187 | ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); | |
8f9f4668 RJ |
1188 | ring->rx_pending = ring->rx_max_pending; |
1189 | ring->tx_pending = ring->tx_max_pending; | |
8f9f4668 RJ |
1190 | } |
1191 | ||
66846048 RJ |
1192 | |
1193 | static void virtnet_get_drvinfo(struct net_device *dev, | |
1194 | struct ethtool_drvinfo *info) | |
1195 | { | |
1196 | struct virtnet_info *vi = netdev_priv(dev); | |
1197 | struct virtio_device *vdev = vi->vdev; | |
1198 | ||
1199 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); | |
1200 | strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); | |
1201 | strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); | |
1202 | ||
1203 | } | |
1204 | ||
d73bcd2c JW |
1205 | /* TODO: Eliminate OOO packets during switching */ |
1206 | static int virtnet_set_channels(struct net_device *dev, | |
1207 | struct ethtool_channels *channels) | |
1208 | { | |
1209 | struct virtnet_info *vi = netdev_priv(dev); | |
1210 | u16 queue_pairs = channels->combined_count; | |
1211 | int err; | |
1212 | ||
1213 | /* We don't support separate rx/tx channels. | |
1214 | * We don't allow setting 'other' channels. | |
1215 | */ | |
1216 | if (channels->rx_count || channels->tx_count || channels->other_count) | |
1217 | return -EINVAL; | |
1218 | ||
1219 | if (queue_pairs > vi->max_queue_pairs) | |
1220 | return -EINVAL; | |
1221 | ||
47be2479 | 1222 | get_online_cpus(); |
d73bcd2c JW |
1223 | err = virtnet_set_queues(vi, queue_pairs); |
1224 | if (!err) { | |
1225 | netif_set_real_num_tx_queues(dev, queue_pairs); | |
1226 | netif_set_real_num_rx_queues(dev, queue_pairs); | |
1227 | ||
8898c21c | 1228 | virtnet_set_affinity(vi); |
d73bcd2c | 1229 | } |
47be2479 | 1230 | put_online_cpus(); |
d73bcd2c JW |
1231 | |
1232 | return err; | |
1233 | } | |
1234 | ||
1235 | static void virtnet_get_channels(struct net_device *dev, | |
1236 | struct ethtool_channels *channels) | |
1237 | { | |
1238 | struct virtnet_info *vi = netdev_priv(dev); | |
1239 | ||
1240 | channels->combined_count = vi->curr_queue_pairs; | |
1241 | channels->max_combined = vi->max_queue_pairs; | |
1242 | channels->max_other = 0; | |
1243 | channels->rx_count = 0; | |
1244 | channels->tx_count = 0; | |
1245 | channels->other_count = 0; | |
1246 | } | |
1247 | ||
0fc0b732 | 1248 | static const struct ethtool_ops virtnet_ethtool_ops = { |
66846048 | 1249 | .get_drvinfo = virtnet_get_drvinfo, |
9f4d26d0 | 1250 | .get_link = ethtool_op_get_link, |
8f9f4668 | 1251 | .get_ringparam = virtnet_get_ringparam, |
d73bcd2c JW |
1252 | .set_channels = virtnet_set_channels, |
1253 | .get_channels = virtnet_get_channels, | |
a9ea3fc6 HX |
1254 | }; |
1255 | ||
39da5814 MM |
1256 | #define MIN_MTU 68 |
1257 | #define MAX_MTU 65535 | |
1258 | ||
1259 | static int virtnet_change_mtu(struct net_device *dev, int new_mtu) | |
1260 | { | |
1261 | if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) | |
1262 | return -EINVAL; | |
1263 | dev->mtu = new_mtu; | |
1264 | return 0; | |
1265 | } | |
1266 | ||
76288b4e SH |
1267 | static const struct net_device_ops virtnet_netdev = { |
1268 | .ndo_open = virtnet_open, | |
1269 | .ndo_stop = virtnet_close, | |
1270 | .ndo_start_xmit = start_xmit, | |
1271 | .ndo_validate_addr = eth_validate_addr, | |
9c46f6d4 | 1272 | .ndo_set_mac_address = virtnet_set_mac_address, |
2af7698e | 1273 | .ndo_set_rx_mode = virtnet_set_rx_mode, |
76288b4e | 1274 | .ndo_change_mtu = virtnet_change_mtu, |
3fa2a1df | 1275 | .ndo_get_stats64 = virtnet_stats, |
1824a989 AW |
1276 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
1277 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, | |
76288b4e SH |
1278 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1279 | .ndo_poll_controller = virtnet_netpoll, | |
1280 | #endif | |
1281 | }; | |
1282 | ||
586d17c5 | 1283 | static void virtnet_config_changed_work(struct work_struct *work) |
9f4d26d0 | 1284 | { |
586d17c5 JW |
1285 | struct virtnet_info *vi = |
1286 | container_of(work, struct virtnet_info, config_work); | |
9f4d26d0 MM |
1287 | u16 v; |
1288 | ||
586d17c5 JW |
1289 | mutex_lock(&vi->config_lock); |
1290 | if (!vi->config_enable) | |
1291 | goto done; | |
1292 | ||
855e0c52 RR |
1293 | if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, |
1294 | struct virtio_net_config, status, &v) < 0) | |
586d17c5 JW |
1295 | goto done; |
1296 | ||
1297 | if (v & VIRTIO_NET_S_ANNOUNCE) { | |
ee89bab1 | 1298 | netdev_notify_peers(vi->dev); |
586d17c5 JW |
1299 | virtnet_ack_link_announce(vi); |
1300 | } | |
9f4d26d0 MM |
1301 | |
1302 | /* Ignore unknown (future) status bits */ | |
1303 | v &= VIRTIO_NET_S_LINK_UP; | |
1304 | ||
1305 | if (vi->status == v) | |
586d17c5 | 1306 | goto done; |
9f4d26d0 MM |
1307 | |
1308 | vi->status = v; | |
1309 | ||
1310 | if (vi->status & VIRTIO_NET_S_LINK_UP) { | |
1311 | netif_carrier_on(vi->dev); | |
986a4f4d | 1312 | netif_tx_wake_all_queues(vi->dev); |
9f4d26d0 MM |
1313 | } else { |
1314 | netif_carrier_off(vi->dev); | |
986a4f4d | 1315 | netif_tx_stop_all_queues(vi->dev); |
9f4d26d0 | 1316 | } |
586d17c5 JW |
1317 | done: |
1318 | mutex_unlock(&vi->config_lock); | |
9f4d26d0 MM |
1319 | } |
1320 | ||
1321 | static void virtnet_config_changed(struct virtio_device *vdev) | |
1322 | { | |
1323 | struct virtnet_info *vi = vdev->priv; | |
1324 | ||
3b07e9ca | 1325 | schedule_work(&vi->config_work); |
9f4d26d0 MM |
1326 | } |
1327 | ||
986a4f4d JW |
1328 | static void virtnet_free_queues(struct virtnet_info *vi) |
1329 | { | |
1330 | kfree(vi->rq); | |
1331 | kfree(vi->sq); | |
1332 | } | |
1333 | ||
1334 | static void free_receive_bufs(struct virtnet_info *vi) | |
1335 | { | |
1336 | int i; | |
1337 | ||
1338 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
1339 | while (vi->rq[i].pages) | |
1340 | __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); | |
1341 | } | |
1342 | } | |
1343 | ||
1344 | static void free_unused_bufs(struct virtnet_info *vi) | |
1345 | { | |
1346 | void *buf; | |
1347 | int i; | |
1348 | ||
1349 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
1350 | struct virtqueue *vq = vi->sq[i].vq; | |
1351 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) | |
1352 | dev_kfree_skb(buf); | |
1353 | } | |
1354 | ||
1355 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
1356 | struct virtqueue *vq = vi->rq[i].vq; | |
1357 | ||
1358 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { | |
2613af0e | 1359 | if (vi->big_packets) |
986a4f4d | 1360 | give_pages(&vi->rq[i], buf); |
2613af0e MD |
1361 | else if (vi->mergeable_rx_bufs) |
1362 | put_page(virt_to_head_page(buf)); | |
986a4f4d JW |
1363 | else |
1364 | dev_kfree_skb(buf); | |
1365 | --vi->rq[i].num; | |
1366 | } | |
1367 | BUG_ON(vi->rq[i].num != 0); | |
1368 | } | |
1369 | } | |
1370 | ||
e9d7417b JW |
1371 | static void virtnet_del_vqs(struct virtnet_info *vi) |
1372 | { | |
1373 | struct virtio_device *vdev = vi->vdev; | |
1374 | ||
8898c21c | 1375 | virtnet_clean_affinity(vi, -1); |
986a4f4d | 1376 | |
e9d7417b | 1377 | vdev->config->del_vqs(vdev); |
986a4f4d JW |
1378 | |
1379 | virtnet_free_queues(vi); | |
e9d7417b JW |
1380 | } |
1381 | ||
986a4f4d | 1382 | static int virtnet_find_vqs(struct virtnet_info *vi) |
3f9c10b0 | 1383 | { |
986a4f4d JW |
1384 | vq_callback_t **callbacks; |
1385 | struct virtqueue **vqs; | |
1386 | int ret = -ENOMEM; | |
1387 | int i, total_vqs; | |
1388 | const char **names; | |
1389 | ||
1390 | /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by | |
1391 | * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by | |
1392 | * possible control vq. | |
1393 | */ | |
1394 | total_vqs = vi->max_queue_pairs * 2 + | |
1395 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); | |
1396 | ||
1397 | /* Allocate space for find_vqs parameters */ | |
1398 | vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); | |
1399 | if (!vqs) | |
1400 | goto err_vq; | |
1401 | callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); | |
1402 | if (!callbacks) | |
1403 | goto err_callback; | |
1404 | names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); | |
1405 | if (!names) | |
1406 | goto err_names; | |
1407 | ||
1408 | /* Parameters for control virtqueue, if any */ | |
1409 | if (vi->has_cvq) { | |
1410 | callbacks[total_vqs - 1] = NULL; | |
1411 | names[total_vqs - 1] = "control"; | |
1412 | } | |
3f9c10b0 | 1413 | |
986a4f4d JW |
1414 | /* Allocate/initialize parameters for send/receive virtqueues */ |
1415 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
1416 | callbacks[rxq2vq(i)] = skb_recv_done; | |
1417 | callbacks[txq2vq(i)] = skb_xmit_done; | |
1418 | sprintf(vi->rq[i].name, "input.%d", i); | |
1419 | sprintf(vi->sq[i].name, "output.%d", i); | |
1420 | names[rxq2vq(i)] = vi->rq[i].name; | |
1421 | names[txq2vq(i)] = vi->sq[i].name; | |
1422 | } | |
3f9c10b0 | 1423 | |
986a4f4d JW |
1424 | ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, |
1425 | names); | |
1426 | if (ret) | |
1427 | goto err_find; | |
3f9c10b0 | 1428 | |
986a4f4d JW |
1429 | if (vi->has_cvq) { |
1430 | vi->cvq = vqs[total_vqs - 1]; | |
3f9c10b0 | 1431 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) |
f646968f | 1432 | vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
3f9c10b0 | 1433 | } |
986a4f4d JW |
1434 | |
1435 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
1436 | vi->rq[i].vq = vqs[rxq2vq(i)]; | |
1437 | vi->sq[i].vq = vqs[txq2vq(i)]; | |
1438 | } | |
1439 | ||
1440 | kfree(names); | |
1441 | kfree(callbacks); | |
1442 | kfree(vqs); | |
1443 | ||
3f9c10b0 | 1444 | return 0; |
986a4f4d JW |
1445 | |
1446 | err_find: | |
1447 | kfree(names); | |
1448 | err_names: | |
1449 | kfree(callbacks); | |
1450 | err_callback: | |
1451 | kfree(vqs); | |
1452 | err_vq: | |
1453 | return ret; | |
1454 | } | |
1455 | ||
1456 | static int virtnet_alloc_queues(struct virtnet_info *vi) | |
1457 | { | |
1458 | int i; | |
1459 | ||
1460 | vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); | |
1461 | if (!vi->sq) | |
1462 | goto err_sq; | |
1463 | vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); | |
008d4278 | 1464 | if (!vi->rq) |
986a4f4d JW |
1465 | goto err_rq; |
1466 | ||
1467 | INIT_DELAYED_WORK(&vi->refill, refill_work); | |
1468 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
1469 | vi->rq[i].pages = NULL; | |
1470 | netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, | |
1471 | napi_weight); | |
1472 | ||
1473 | sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); | |
1474 | sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); | |
1475 | } | |
1476 | ||
1477 | return 0; | |
1478 | ||
1479 | err_rq: | |
1480 | kfree(vi->sq); | |
1481 | err_sq: | |
1482 | return -ENOMEM; | |
1483 | } | |
1484 | ||
1485 | static int init_vqs(struct virtnet_info *vi) | |
1486 | { | |
1487 | int ret; | |
1488 | ||
1489 | /* Allocate send & receive queues */ | |
1490 | ret = virtnet_alloc_queues(vi); | |
1491 | if (ret) | |
1492 | goto err; | |
1493 | ||
1494 | ret = virtnet_find_vqs(vi); | |
1495 | if (ret) | |
1496 | goto err_free; | |
1497 | ||
47be2479 | 1498 | get_online_cpus(); |
8898c21c | 1499 | virtnet_set_affinity(vi); |
47be2479 WG |
1500 | put_online_cpus(); |
1501 | ||
986a4f4d JW |
1502 | return 0; |
1503 | ||
1504 | err_free: | |
1505 | virtnet_free_queues(vi); | |
1506 | err: | |
1507 | return ret; | |
3f9c10b0 AS |
1508 | } |
1509 | ||
296f96fc RR |
1510 | static int virtnet_probe(struct virtio_device *vdev) |
1511 | { | |
986a4f4d | 1512 | int i, err; |
296f96fc RR |
1513 | struct net_device *dev; |
1514 | struct virtnet_info *vi; | |
986a4f4d JW |
1515 | u16 max_queue_pairs; |
1516 | ||
1517 | /* Find if host supports multiqueue virtio_net device */ | |
855e0c52 RR |
1518 | err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, |
1519 | struct virtio_net_config, | |
1520 | max_virtqueue_pairs, &max_queue_pairs); | |
986a4f4d JW |
1521 | |
1522 | /* We need at least 2 queue's */ | |
1523 | if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || | |
1524 | max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || | |
1525 | !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) | |
1526 | max_queue_pairs = 1; | |
296f96fc RR |
1527 | |
1528 | /* Allocate ourselves a network device with room for our info */ | |
986a4f4d | 1529 | dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); |
296f96fc RR |
1530 | if (!dev) |
1531 | return -ENOMEM; | |
1532 | ||
1533 | /* Set up network device as normal. */ | |
f2f2c8b4 | 1534 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; |
76288b4e | 1535 | dev->netdev_ops = &virtnet_netdev; |
296f96fc | 1536 | dev->features = NETIF_F_HIGHDMA; |
3fa2a1df | 1537 | |
a9ea3fc6 | 1538 | SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); |
296f96fc RR |
1539 | SET_NETDEV_DEV(dev, &vdev->dev); |
1540 | ||
1541 | /* Do we support "hardware" checksums? */ | |
98e778c9 | 1542 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { |
296f96fc | 1543 | /* This opens up the world of extra features. */ |
98e778c9 MM |
1544 | dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; |
1545 | if (csum) | |
1546 | dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; | |
1547 | ||
1548 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { | |
1549 | dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO | |
34a48579 RR |
1550 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; |
1551 | } | |
5539ae96 | 1552 | /* Individual feature bits: what can host handle? */ |
98e778c9 MM |
1553 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) |
1554 | dev->hw_features |= NETIF_F_TSO; | |
1555 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) | |
1556 | dev->hw_features |= NETIF_F_TSO6; | |
1557 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) | |
1558 | dev->hw_features |= NETIF_F_TSO_ECN; | |
1559 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) | |
1560 | dev->hw_features |= NETIF_F_UFO; | |
1561 | ||
1562 | if (gso) | |
1563 | dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); | |
1564 | /* (!csum && gso) case will be fixed by register_netdev() */ | |
296f96fc | 1565 | } |
4f49129b TH |
1566 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) |
1567 | dev->features |= NETIF_F_RXCSUM; | |
296f96fc | 1568 | |
4fda8302 JW |
1569 | dev->vlan_features = dev->features; |
1570 | ||
296f96fc | 1571 | /* Configuration may specify what MAC to use. Otherwise random. */ |
855e0c52 RR |
1572 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) |
1573 | virtio_cread_bytes(vdev, | |
1574 | offsetof(struct virtio_net_config, mac), | |
1575 | dev->dev_addr, dev->addr_len); | |
1576 | else | |
f2cedb63 | 1577 | eth_hw_addr_random(dev); |
296f96fc RR |
1578 | |
1579 | /* Set up our device-specific information */ | |
1580 | vi = netdev_priv(dev); | |
296f96fc RR |
1581 | vi->dev = dev; |
1582 | vi->vdev = vdev; | |
d9d5dcc8 | 1583 | vdev->priv = vi; |
3fa2a1df | 1584 | vi->stats = alloc_percpu(struct virtnet_stats); |
1585 | err = -ENOMEM; | |
1586 | if (vi->stats == NULL) | |
1587 | goto free; | |
1588 | ||
827da44c JS |
1589 | for_each_possible_cpu(i) { |
1590 | struct virtnet_stats *virtnet_stats; | |
1591 | virtnet_stats = per_cpu_ptr(vi->stats, i); | |
1592 | u64_stats_init(&virtnet_stats->tx_syncp); | |
1593 | u64_stats_init(&virtnet_stats->rx_syncp); | |
1594 | } | |
1595 | ||
586d17c5 JW |
1596 | mutex_init(&vi->config_lock); |
1597 | vi->config_enable = true; | |
1598 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); | |
296f96fc | 1599 | |
97402b96 | 1600 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
8e95a202 JP |
1601 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
1602 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || | |
1603 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) | |
97402b96 HX |
1604 | vi->big_packets = true; |
1605 | ||
3f2c31d9 MM |
1606 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
1607 | vi->mergeable_rx_bufs = true; | |
1608 | ||
e7428e95 MT |
1609 | if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) |
1610 | vi->any_header_sg = true; | |
1611 | ||
986a4f4d JW |
1612 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
1613 | vi->has_cvq = true; | |
1614 | ||
1615 | /* Use single tx/rx queue pair as default */ | |
1616 | vi->curr_queue_pairs = 1; | |
1617 | vi->max_queue_pairs = max_queue_pairs; | |
1618 | ||
1619 | /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ | |
3f9c10b0 | 1620 | err = init_vqs(vi); |
d2a7ddda | 1621 | if (err) |
9bb8ca86 | 1622 | goto free_stats; |
296f96fc | 1623 | |
0f13b66b ZYW |
1624 | netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); |
1625 | netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); | |
986a4f4d | 1626 | |
296f96fc RR |
1627 | err = register_netdev(dev); |
1628 | if (err) { | |
1629 | pr_debug("virtio_net: registering device failed\n"); | |
d2a7ddda | 1630 | goto free_vqs; |
296f96fc | 1631 | } |
b3369c1f RR |
1632 | |
1633 | /* Last of all, set up some receive buffers. */ | |
55257d72 | 1634 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
986a4f4d JW |
1635 | try_fill_recv(&vi->rq[i], GFP_KERNEL); |
1636 | ||
1637 | /* If we didn't even get one input buffer, we're useless. */ | |
1638 | if (vi->rq[i].num == 0) { | |
1639 | free_unused_bufs(vi); | |
1640 | err = -ENOMEM; | |
1641 | goto free_recv_bufs; | |
1642 | } | |
b3369c1f RR |
1643 | } |
1644 | ||
8de4b2f3 WG |
1645 | vi->nb.notifier_call = &virtnet_cpu_callback; |
1646 | err = register_hotcpu_notifier(&vi->nb); | |
1647 | if (err) { | |
1648 | pr_debug("virtio_net: registering cpu notifier failed\n"); | |
1649 | goto free_recv_bufs; | |
1650 | } | |
1651 | ||
167c25e4 JW |
1652 | /* Assume link up if device can't report link status, |
1653 | otherwise get link status from config. */ | |
1654 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { | |
1655 | netif_carrier_off(dev); | |
3b07e9ca | 1656 | schedule_work(&vi->config_work); |
167c25e4 JW |
1657 | } else { |
1658 | vi->status = VIRTIO_NET_S_LINK_UP; | |
1659 | netif_carrier_on(dev); | |
1660 | } | |
9f4d26d0 | 1661 | |
986a4f4d JW |
1662 | pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", |
1663 | dev->name, max_queue_pairs); | |
1664 | ||
296f96fc RR |
1665 | return 0; |
1666 | ||
986a4f4d JW |
1667 | free_recv_bufs: |
1668 | free_receive_bufs(vi); | |
b3369c1f | 1669 | unregister_netdev(dev); |
d2a7ddda | 1670 | free_vqs: |
986a4f4d | 1671 | cancel_delayed_work_sync(&vi->refill); |
e9d7417b | 1672 | virtnet_del_vqs(vi); |
2613af0e MD |
1673 | if (vi->alloc_frag.page) |
1674 | put_page(vi->alloc_frag.page); | |
3fa2a1df | 1675 | free_stats: |
1676 | free_percpu(vi->stats); | |
296f96fc RR |
1677 | free: |
1678 | free_netdev(dev); | |
1679 | return err; | |
1680 | } | |
1681 | ||
04486ed0 | 1682 | static void remove_vq_common(struct virtnet_info *vi) |
296f96fc | 1683 | { |
04486ed0 | 1684 | vi->vdev->config->reset(vi->vdev); |
830a8a97 SM |
1685 | |
1686 | /* Free unused buffers in both send and recv, if any. */ | |
9ab86bbc | 1687 | free_unused_bufs(vi); |
fb6813f4 | 1688 | |
986a4f4d | 1689 | free_receive_bufs(vi); |
d2a7ddda | 1690 | |
986a4f4d | 1691 | virtnet_del_vqs(vi); |
04486ed0 AS |
1692 | } |
1693 | ||
8cc085d6 | 1694 | static void virtnet_remove(struct virtio_device *vdev) |
04486ed0 AS |
1695 | { |
1696 | struct virtnet_info *vi = vdev->priv; | |
1697 | ||
8de4b2f3 WG |
1698 | unregister_hotcpu_notifier(&vi->nb); |
1699 | ||
586d17c5 JW |
1700 | /* Prevent config work handler from accessing the device. */ |
1701 | mutex_lock(&vi->config_lock); | |
1702 | vi->config_enable = false; | |
1703 | mutex_unlock(&vi->config_lock); | |
1704 | ||
04486ed0 AS |
1705 | unregister_netdev(vi->dev); |
1706 | ||
1707 | remove_vq_common(vi); | |
2613af0e MD |
1708 | if (vi->alloc_frag.page) |
1709 | put_page(vi->alloc_frag.page); | |
fb6813f4 | 1710 | |
586d17c5 JW |
1711 | flush_work(&vi->config_work); |
1712 | ||
2e66f55b | 1713 | free_percpu(vi->stats); |
74b2553f | 1714 | free_netdev(vi->dev); |
296f96fc RR |
1715 | } |
1716 | ||
89107000 | 1717 | #ifdef CONFIG_PM_SLEEP |
0741bcb5 AS |
1718 | static int virtnet_freeze(struct virtio_device *vdev) |
1719 | { | |
1720 | struct virtnet_info *vi = vdev->priv; | |
986a4f4d | 1721 | int i; |
0741bcb5 | 1722 | |
ec9debbd JW |
1723 | unregister_hotcpu_notifier(&vi->nb); |
1724 | ||
586d17c5 JW |
1725 | /* Prevent config work handler from accessing the device */ |
1726 | mutex_lock(&vi->config_lock); | |
1727 | vi->config_enable = false; | |
1728 | mutex_unlock(&vi->config_lock); | |
1729 | ||
0741bcb5 AS |
1730 | netif_device_detach(vi->dev); |
1731 | cancel_delayed_work_sync(&vi->refill); | |
1732 | ||
1733 | if (netif_running(vi->dev)) | |
986a4f4d JW |
1734 | for (i = 0; i < vi->max_queue_pairs; i++) { |
1735 | napi_disable(&vi->rq[i].napi); | |
1736 | netif_napi_del(&vi->rq[i].napi); | |
1737 | } | |
0741bcb5 AS |
1738 | |
1739 | remove_vq_common(vi); | |
1740 | ||
586d17c5 JW |
1741 | flush_work(&vi->config_work); |
1742 | ||
0741bcb5 AS |
1743 | return 0; |
1744 | } | |
1745 | ||
1746 | static int virtnet_restore(struct virtio_device *vdev) | |
1747 | { | |
1748 | struct virtnet_info *vi = vdev->priv; | |
986a4f4d | 1749 | int err, i; |
0741bcb5 AS |
1750 | |
1751 | err = init_vqs(vi); | |
1752 | if (err) | |
1753 | return err; | |
1754 | ||
1755 | if (netif_running(vi->dev)) | |
986a4f4d JW |
1756 | for (i = 0; i < vi->max_queue_pairs; i++) |
1757 | virtnet_napi_enable(&vi->rq[i]); | |
0741bcb5 AS |
1758 | |
1759 | netif_device_attach(vi->dev); | |
1760 | ||
55257d72 | 1761 | for (i = 0; i < vi->curr_queue_pairs; i++) |
986a4f4d JW |
1762 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) |
1763 | schedule_delayed_work(&vi->refill, 0); | |
0741bcb5 | 1764 | |
586d17c5 JW |
1765 | mutex_lock(&vi->config_lock); |
1766 | vi->config_enable = true; | |
1767 | mutex_unlock(&vi->config_lock); | |
1768 | ||
35ed159b | 1769 | rtnl_lock(); |
986a4f4d | 1770 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
35ed159b | 1771 | rtnl_unlock(); |
986a4f4d | 1772 | |
ec9debbd JW |
1773 | err = register_hotcpu_notifier(&vi->nb); |
1774 | if (err) | |
1775 | return err; | |
1776 | ||
0741bcb5 AS |
1777 | return 0; |
1778 | } | |
1779 | #endif | |
1780 | ||
296f96fc RR |
1781 | static struct virtio_device_id id_table[] = { |
1782 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, | |
1783 | { 0 }, | |
1784 | }; | |
1785 | ||
c45a6816 | 1786 | static unsigned int features[] = { |
5e4fe5c4 MM |
1787 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, |
1788 | VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, | |
c45a6816 | 1789 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, |
97402b96 | 1790 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, |
5c516751 | 1791 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, |
2a41f71d | 1792 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, |
0bde9569 | 1793 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, |
986a4f4d | 1794 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, |
7e58d5ae | 1795 | VIRTIO_NET_F_CTRL_MAC_ADDR, |
e7428e95 | 1796 | VIRTIO_F_ANY_LAYOUT, |
c45a6816 RR |
1797 | }; |
1798 | ||
22402529 | 1799 | static struct virtio_driver virtio_net_driver = { |
c45a6816 RR |
1800 | .feature_table = features, |
1801 | .feature_table_size = ARRAY_SIZE(features), | |
296f96fc RR |
1802 | .driver.name = KBUILD_MODNAME, |
1803 | .driver.owner = THIS_MODULE, | |
1804 | .id_table = id_table, | |
1805 | .probe = virtnet_probe, | |
8cc085d6 | 1806 | .remove = virtnet_remove, |
9f4d26d0 | 1807 | .config_changed = virtnet_config_changed, |
89107000 | 1808 | #ifdef CONFIG_PM_SLEEP |
0741bcb5 AS |
1809 | .freeze = virtnet_freeze, |
1810 | .restore = virtnet_restore, | |
1811 | #endif | |
296f96fc RR |
1812 | }; |
1813 | ||
b2a17029 | 1814 | module_virtio_driver(virtio_net_driver); |
296f96fc RR |
1815 | |
1816 | MODULE_DEVICE_TABLE(virtio, id_table); | |
1817 | MODULE_DESCRIPTION("Virtio network driver"); | |
1818 | MODULE_LICENSE("GPL"); |