hv_netvsc: Eliminate status from struct hv_netvsc_packet
[deliverable/linux.git] / drivers / net / hyperv / netvsc.c
1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * Authors:
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
19 */
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/wait.h>
25 #include <linux/mm.h>
26 #include <linux/delay.h>
27 #include <linux/io.h>
28 #include <linux/slab.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/vmalloc.h>
32 #include <asm/sync_bitops.h>
33
34 #include "hyperv_net.h"
35
36
37 static struct netvsc_device *alloc_net_device(struct hv_device *device)
38 {
39 struct netvsc_device *net_device;
40 struct net_device *ndev = hv_get_drvdata(device);
41
42 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
43 if (!net_device)
44 return NULL;
45
46 net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
47 if (!net_device->cb_buffer) {
48 kfree(net_device);
49 return NULL;
50 }
51
52 init_waitqueue_head(&net_device->wait_drain);
53 net_device->start_remove = false;
54 net_device->destroy = false;
55 net_device->dev = device;
56 net_device->ndev = ndev;
57 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
58 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
59
60 hv_set_drvdata(device, net_device);
61 return net_device;
62 }
63
64 static void free_netvsc_device(struct netvsc_device *nvdev)
65 {
66 kfree(nvdev->cb_buffer);
67 kfree(nvdev);
68 }
69
70 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
71 {
72 struct netvsc_device *net_device;
73
74 net_device = hv_get_drvdata(device);
75 if (net_device && net_device->destroy)
76 net_device = NULL;
77
78 return net_device;
79 }
80
81 static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
82 {
83 struct netvsc_device *net_device;
84
85 net_device = hv_get_drvdata(device);
86
87 if (!net_device)
88 goto get_in_err;
89
90 if (net_device->destroy &&
91 atomic_read(&net_device->num_outstanding_sends) == 0)
92 net_device = NULL;
93
94 get_in_err:
95 return net_device;
96 }
97
98
99 static int netvsc_destroy_buf(struct netvsc_device *net_device)
100 {
101 struct nvsp_message *revoke_packet;
102 int ret = 0;
103 struct net_device *ndev = net_device->ndev;
104
105 /*
106 * If we got a section count, it means we received a
107 * SendReceiveBufferComplete msg (ie sent
108 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
109 * to send a revoke msg here
110 */
111 if (net_device->recv_section_cnt) {
112 /* Send the revoke receive buffer */
113 revoke_packet = &net_device->revoke_packet;
114 memset(revoke_packet, 0, sizeof(struct nvsp_message));
115
116 revoke_packet->hdr.msg_type =
117 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
118 revoke_packet->msg.v1_msg.
119 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
120
121 ret = vmbus_sendpacket(net_device->dev->channel,
122 revoke_packet,
123 sizeof(struct nvsp_message),
124 (unsigned long)revoke_packet,
125 VM_PKT_DATA_INBAND, 0);
126 /*
127 * If we failed here, we might as well return and
128 * have a leak rather than continue and a bugchk
129 */
130 if (ret != 0) {
131 netdev_err(ndev, "unable to send "
132 "revoke receive buffer to netvsp\n");
133 return ret;
134 }
135 }
136
137 /* Teardown the gpadl on the vsp end */
138 if (net_device->recv_buf_gpadl_handle) {
139 ret = vmbus_teardown_gpadl(net_device->dev->channel,
140 net_device->recv_buf_gpadl_handle);
141
142 /* If we failed here, we might as well return and have a leak
143 * rather than continue and a bugchk
144 */
145 if (ret != 0) {
146 netdev_err(ndev,
147 "unable to teardown receive buffer's gpadl\n");
148 return ret;
149 }
150 net_device->recv_buf_gpadl_handle = 0;
151 }
152
153 if (net_device->recv_buf) {
154 /* Free up the receive buffer */
155 vfree(net_device->recv_buf);
156 net_device->recv_buf = NULL;
157 }
158
159 if (net_device->recv_section) {
160 net_device->recv_section_cnt = 0;
161 kfree(net_device->recv_section);
162 net_device->recv_section = NULL;
163 }
164
165 /* Deal with the send buffer we may have setup.
166 * If we got a send section size, it means we received a
167 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
168 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
169 * to send a revoke msg here
170 */
171 if (net_device->send_section_size) {
172 /* Send the revoke receive buffer */
173 revoke_packet = &net_device->revoke_packet;
174 memset(revoke_packet, 0, sizeof(struct nvsp_message));
175
176 revoke_packet->hdr.msg_type =
177 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
178 revoke_packet->msg.v1_msg.revoke_send_buf.id =
179 NETVSC_SEND_BUFFER_ID;
180
181 ret = vmbus_sendpacket(net_device->dev->channel,
182 revoke_packet,
183 sizeof(struct nvsp_message),
184 (unsigned long)revoke_packet,
185 VM_PKT_DATA_INBAND, 0);
186 /* If we failed here, we might as well return and
187 * have a leak rather than continue and a bugchk
188 */
189 if (ret != 0) {
190 netdev_err(ndev, "unable to send "
191 "revoke send buffer to netvsp\n");
192 return ret;
193 }
194 }
195 /* Teardown the gpadl on the vsp end */
196 if (net_device->send_buf_gpadl_handle) {
197 ret = vmbus_teardown_gpadl(net_device->dev->channel,
198 net_device->send_buf_gpadl_handle);
199
200 /* If we failed here, we might as well return and have a leak
201 * rather than continue and a bugchk
202 */
203 if (ret != 0) {
204 netdev_err(ndev,
205 "unable to teardown send buffer's gpadl\n");
206 return ret;
207 }
208 net_device->send_buf_gpadl_handle = 0;
209 }
210 if (net_device->send_buf) {
211 /* Free up the send buffer */
212 vfree(net_device->send_buf);
213 net_device->send_buf = NULL;
214 }
215 kfree(net_device->send_section_map);
216
217 return ret;
218 }
219
220 static int netvsc_init_buf(struct hv_device *device)
221 {
222 int ret = 0;
223 unsigned long t;
224 struct netvsc_device *net_device;
225 struct nvsp_message *init_packet;
226 struct net_device *ndev;
227 int node;
228
229 net_device = get_outbound_net_device(device);
230 if (!net_device)
231 return -ENODEV;
232 ndev = net_device->ndev;
233
234 node = cpu_to_node(device->channel->target_cpu);
235 net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
236 if (!net_device->recv_buf)
237 net_device->recv_buf = vzalloc(net_device->recv_buf_size);
238
239 if (!net_device->recv_buf) {
240 netdev_err(ndev, "unable to allocate receive "
241 "buffer of size %d\n", net_device->recv_buf_size);
242 ret = -ENOMEM;
243 goto cleanup;
244 }
245
246 /*
247 * Establish the gpadl handle for this buffer on this
248 * channel. Note: This call uses the vmbus connection rather
249 * than the channel to establish the gpadl handle.
250 */
251 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
252 net_device->recv_buf_size,
253 &net_device->recv_buf_gpadl_handle);
254 if (ret != 0) {
255 netdev_err(ndev,
256 "unable to establish receive buffer's gpadl\n");
257 goto cleanup;
258 }
259
260
261 /* Notify the NetVsp of the gpadl handle */
262 init_packet = &net_device->channel_init_pkt;
263
264 memset(init_packet, 0, sizeof(struct nvsp_message));
265
266 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
267 init_packet->msg.v1_msg.send_recv_buf.
268 gpadl_handle = net_device->recv_buf_gpadl_handle;
269 init_packet->msg.v1_msg.
270 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
271
272 /* Send the gpadl notification request */
273 ret = vmbus_sendpacket(device->channel, init_packet,
274 sizeof(struct nvsp_message),
275 (unsigned long)init_packet,
276 VM_PKT_DATA_INBAND,
277 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
278 if (ret != 0) {
279 netdev_err(ndev,
280 "unable to send receive buffer's gpadl to netvsp\n");
281 goto cleanup;
282 }
283
284 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
285 BUG_ON(t == 0);
286
287
288 /* Check the response */
289 if (init_packet->msg.v1_msg.
290 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
291 netdev_err(ndev, "Unable to complete receive buffer "
292 "initialization with NetVsp - status %d\n",
293 init_packet->msg.v1_msg.
294 send_recv_buf_complete.status);
295 ret = -EINVAL;
296 goto cleanup;
297 }
298
299 /* Parse the response */
300
301 net_device->recv_section_cnt = init_packet->msg.
302 v1_msg.send_recv_buf_complete.num_sections;
303
304 net_device->recv_section = kmemdup(
305 init_packet->msg.v1_msg.send_recv_buf_complete.sections,
306 net_device->recv_section_cnt *
307 sizeof(struct nvsp_1_receive_buffer_section),
308 GFP_KERNEL);
309 if (net_device->recv_section == NULL) {
310 ret = -EINVAL;
311 goto cleanup;
312 }
313
314 /*
315 * For 1st release, there should only be 1 section that represents the
316 * entire receive buffer
317 */
318 if (net_device->recv_section_cnt != 1 ||
319 net_device->recv_section->offset != 0) {
320 ret = -EINVAL;
321 goto cleanup;
322 }
323
324 /* Now setup the send buffer.
325 */
326 net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
327 if (!net_device->send_buf)
328 net_device->send_buf = vzalloc(net_device->send_buf_size);
329 if (!net_device->send_buf) {
330 netdev_err(ndev, "unable to allocate send "
331 "buffer of size %d\n", net_device->send_buf_size);
332 ret = -ENOMEM;
333 goto cleanup;
334 }
335
336 /* Establish the gpadl handle for this buffer on this
337 * channel. Note: This call uses the vmbus connection rather
338 * than the channel to establish the gpadl handle.
339 */
340 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
341 net_device->send_buf_size,
342 &net_device->send_buf_gpadl_handle);
343 if (ret != 0) {
344 netdev_err(ndev,
345 "unable to establish send buffer's gpadl\n");
346 goto cleanup;
347 }
348
349 /* Notify the NetVsp of the gpadl handle */
350 init_packet = &net_device->channel_init_pkt;
351 memset(init_packet, 0, sizeof(struct nvsp_message));
352 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
353 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
354 net_device->send_buf_gpadl_handle;
355 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
356
357 /* Send the gpadl notification request */
358 ret = vmbus_sendpacket(device->channel, init_packet,
359 sizeof(struct nvsp_message),
360 (unsigned long)init_packet,
361 VM_PKT_DATA_INBAND,
362 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
363 if (ret != 0) {
364 netdev_err(ndev,
365 "unable to send send buffer's gpadl to netvsp\n");
366 goto cleanup;
367 }
368
369 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
370 BUG_ON(t == 0);
371
372 /* Check the response */
373 if (init_packet->msg.v1_msg.
374 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
375 netdev_err(ndev, "Unable to complete send buffer "
376 "initialization with NetVsp - status %d\n",
377 init_packet->msg.v1_msg.
378 send_send_buf_complete.status);
379 ret = -EINVAL;
380 goto cleanup;
381 }
382
383 /* Parse the response */
384 net_device->send_section_size = init_packet->msg.
385 v1_msg.send_send_buf_complete.section_size;
386
387 /* Section count is simply the size divided by the section size.
388 */
389 net_device->send_section_cnt =
390 net_device->send_buf_size/net_device->send_section_size;
391
392 dev_info(&device->device, "Send section size: %d, Section count:%d\n",
393 net_device->send_section_size, net_device->send_section_cnt);
394
395 /* Setup state for managing the send buffer. */
396 net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
397 BITS_PER_LONG);
398
399 net_device->send_section_map =
400 kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
401 if (net_device->send_section_map == NULL) {
402 ret = -ENOMEM;
403 goto cleanup;
404 }
405
406 goto exit;
407
408 cleanup:
409 netvsc_destroy_buf(net_device);
410
411 exit:
412 return ret;
413 }
414
415
416 /* Negotiate NVSP protocol version */
417 static int negotiate_nvsp_ver(struct hv_device *device,
418 struct netvsc_device *net_device,
419 struct nvsp_message *init_packet,
420 u32 nvsp_ver)
421 {
422 int ret;
423 unsigned long t;
424
425 memset(init_packet, 0, sizeof(struct nvsp_message));
426 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
427 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
428 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
429
430 /* Send the init request */
431 ret = vmbus_sendpacket(device->channel, init_packet,
432 sizeof(struct nvsp_message),
433 (unsigned long)init_packet,
434 VM_PKT_DATA_INBAND,
435 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
436
437 if (ret != 0)
438 return ret;
439
440 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
441
442 if (t == 0)
443 return -ETIMEDOUT;
444
445 if (init_packet->msg.init_msg.init_complete.status !=
446 NVSP_STAT_SUCCESS)
447 return -EINVAL;
448
449 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
450 return 0;
451
452 /* NVSPv2 or later: Send NDIS config */
453 memset(init_packet, 0, sizeof(struct nvsp_message));
454 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
455 init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
456 ETH_HLEN;
457 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
458
459 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
460 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
461
462 ret = vmbus_sendpacket(device->channel, init_packet,
463 sizeof(struct nvsp_message),
464 (unsigned long)init_packet,
465 VM_PKT_DATA_INBAND, 0);
466
467 return ret;
468 }
469
470 static int netvsc_connect_vsp(struct hv_device *device)
471 {
472 int ret;
473 struct netvsc_device *net_device;
474 struct nvsp_message *init_packet;
475 int ndis_version;
476 struct net_device *ndev;
477 u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
478 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
479 int i, num_ver = 4; /* number of different NVSP versions */
480
481 net_device = get_outbound_net_device(device);
482 if (!net_device)
483 return -ENODEV;
484 ndev = net_device->ndev;
485
486 init_packet = &net_device->channel_init_pkt;
487
488 /* Negotiate the latest NVSP protocol supported */
489 for (i = num_ver - 1; i >= 0; i--)
490 if (negotiate_nvsp_ver(device, net_device, init_packet,
491 ver_list[i]) == 0) {
492 net_device->nvsp_version = ver_list[i];
493 break;
494 }
495
496 if (i < 0) {
497 ret = -EPROTO;
498 goto cleanup;
499 }
500
501 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
502
503 /* Send the ndis version */
504 memset(init_packet, 0, sizeof(struct nvsp_message));
505
506 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
507 ndis_version = 0x00060001;
508 else
509 ndis_version = 0x0006001e;
510
511 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
512 init_packet->msg.v1_msg.
513 send_ndis_ver.ndis_major_ver =
514 (ndis_version & 0xFFFF0000) >> 16;
515 init_packet->msg.v1_msg.
516 send_ndis_ver.ndis_minor_ver =
517 ndis_version & 0xFFFF;
518
519 /* Send the init request */
520 ret = vmbus_sendpacket(device->channel, init_packet,
521 sizeof(struct nvsp_message),
522 (unsigned long)init_packet,
523 VM_PKT_DATA_INBAND, 0);
524 if (ret != 0)
525 goto cleanup;
526
527 /* Post the big receive buffer to NetVSP */
528 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
529 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
530 else
531 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
532 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
533
534 ret = netvsc_init_buf(device);
535
536 cleanup:
537 return ret;
538 }
539
540 static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
541 {
542 netvsc_destroy_buf(net_device);
543 }
544
545 /*
546 * netvsc_device_remove - Callback when the root bus device is removed
547 */
548 int netvsc_device_remove(struct hv_device *device)
549 {
550 struct netvsc_device *net_device;
551 unsigned long flags;
552
553 net_device = hv_get_drvdata(device);
554
555 netvsc_disconnect_vsp(net_device);
556
557 /*
558 * Since we have already drained, we don't need to busy wait
559 * as was done in final_release_stor_device()
560 * Note that we cannot set the ext pointer to NULL until
561 * we have drained - to drain the outgoing packets, we need to
562 * allow incoming packets.
563 */
564
565 spin_lock_irqsave(&device->channel->inbound_lock, flags);
566 hv_set_drvdata(device, NULL);
567 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
568
569 /*
570 * At this point, no one should be accessing net_device
571 * except in here
572 */
573 dev_notice(&device->device, "net device safe to remove\n");
574
575 /* Now, we can close the channel safely */
576 vmbus_close(device->channel);
577
578 /* Release all resources */
579 vfree(net_device->sub_cb_buf);
580 free_netvsc_device(net_device);
581 return 0;
582 }
583
584
585 #define RING_AVAIL_PERCENT_HIWATER 20
586 #define RING_AVAIL_PERCENT_LOWATER 10
587
588 /*
589 * Get the percentage of available bytes to write in the ring.
590 * The return value is in range from 0 to 100.
591 */
592 static inline u32 hv_ringbuf_avail_percent(
593 struct hv_ring_buffer_info *ring_info)
594 {
595 u32 avail_read, avail_write;
596
597 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
598
599 return avail_write * 100 / ring_info->ring_datasize;
600 }
601
602 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
603 u32 index)
604 {
605 sync_change_bit(index, net_device->send_section_map);
606 }
607
608 static void netvsc_send_completion(struct netvsc_device *net_device,
609 struct vmbus_channel *incoming_channel,
610 struct hv_device *device,
611 struct vmpacket_descriptor *packet)
612 {
613 struct nvsp_message *nvsp_packet;
614 struct hv_netvsc_packet *nvsc_packet;
615 struct net_device *ndev;
616 u32 send_index;
617 struct sk_buff *skb;
618
619 ndev = net_device->ndev;
620
621 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
622 (packet->offset8 << 3));
623
624 if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
625 (nvsp_packet->hdr.msg_type ==
626 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
627 (nvsp_packet->hdr.msg_type ==
628 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
629 (nvsp_packet->hdr.msg_type ==
630 NVSP_MSG5_TYPE_SUBCHANNEL)) {
631 /* Copy the response back */
632 memcpy(&net_device->channel_init_pkt, nvsp_packet,
633 sizeof(struct nvsp_message));
634 complete(&net_device->channel_init_wait);
635 } else if (nvsp_packet->hdr.msg_type ==
636 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
637 int num_outstanding_sends;
638 u16 q_idx = 0;
639 struct vmbus_channel *channel = device->channel;
640 int queue_sends;
641
642 /* Get the send context */
643 skb = (struct sk_buff *)(unsigned long)packet->trans_id;
644
645 /* Notify the layer above us */
646 if (skb) {
647 nvsc_packet = (struct hv_netvsc_packet *) skb->cb;
648 send_index = nvsc_packet->send_buf_index;
649 if (send_index != NETVSC_INVALID_INDEX)
650 netvsc_free_send_slot(net_device, send_index);
651 q_idx = nvsc_packet->q_idx;
652 channel = incoming_channel;
653 dev_kfree_skb_any(skb);
654 }
655
656 num_outstanding_sends =
657 atomic_dec_return(&net_device->num_outstanding_sends);
658 queue_sends = atomic_dec_return(&net_device->
659 queue_sends[q_idx]);
660
661 if (net_device->destroy && num_outstanding_sends == 0)
662 wake_up(&net_device->wait_drain);
663
664 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
665 !net_device->start_remove &&
666 (hv_ringbuf_avail_percent(&channel->outbound) >
667 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
668 netif_tx_wake_queue(netdev_get_tx_queue(
669 ndev, q_idx));
670 } else {
671 netdev_err(ndev, "Unknown send completion packet type- "
672 "%d received!!\n", nvsp_packet->hdr.msg_type);
673 }
674
675 }
676
677 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
678 {
679 unsigned long index;
680 u32 max_words = net_device->map_words;
681 unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
682 u32 section_cnt = net_device->send_section_cnt;
683 int ret_val = NETVSC_INVALID_INDEX;
684 int i;
685 int prev_val;
686
687 for (i = 0; i < max_words; i++) {
688 if (!~(map_addr[i]))
689 continue;
690 index = ffz(map_addr[i]);
691 prev_val = sync_test_and_set_bit(index, &map_addr[i]);
692 if (prev_val)
693 continue;
694 if ((index + (i * BITS_PER_LONG)) >= section_cnt)
695 break;
696 ret_val = (index + (i * BITS_PER_LONG));
697 break;
698 }
699 return ret_val;
700 }
701
702 static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
703 unsigned int section_index,
704 u32 pend_size,
705 struct hv_netvsc_packet *packet,
706 struct rndis_message *rndis_msg,
707 struct hv_page_buffer **pb,
708 struct sk_buff *skb)
709 {
710 char *start = net_device->send_buf;
711 char *dest = start + (section_index * net_device->send_section_size)
712 + pend_size;
713 int i;
714 bool is_data_pkt = (skb != NULL) ? true : false;
715 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
716 u32 msg_size = 0;
717 u32 padding = 0;
718 u32 remain = packet->total_data_buflen % net_device->pkt_align;
719 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
720 packet->page_buf_cnt;
721
722 /* Add padding */
723 if (is_data_pkt && xmit_more && remain &&
724 !packet->cp_partial) {
725 padding = net_device->pkt_align - remain;
726 rndis_msg->msg_len += padding;
727 packet->total_data_buflen += padding;
728 }
729
730 for (i = 0; i < page_count; i++) {
731 char *src = phys_to_virt((*pb)[i].pfn << PAGE_SHIFT);
732 u32 offset = (*pb)[i].offset;
733 u32 len = (*pb)[i].len;
734
735 memcpy(dest, (src + offset), len);
736 msg_size += len;
737 dest += len;
738 }
739
740 if (padding) {
741 memset(dest, 0, padding);
742 msg_size += padding;
743 }
744
745 return msg_size;
746 }
747
748 static inline int netvsc_send_pkt(
749 struct hv_netvsc_packet *packet,
750 struct netvsc_device *net_device,
751 struct hv_page_buffer **pb,
752 struct sk_buff *skb)
753 {
754 struct nvsp_message nvmsg;
755 u16 q_idx = packet->q_idx;
756 struct vmbus_channel *out_channel = net_device->chn_table[q_idx];
757 struct net_device *ndev = net_device->ndev;
758 u64 req_id;
759 int ret;
760 struct hv_page_buffer *pgbuf;
761 u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
762 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
763
764 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
765 if (skb != NULL) {
766 /* 0 is RMC_DATA; */
767 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0;
768 } else {
769 /* 1 is RMC_CONTROL; */
770 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1;
771 }
772
773 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
774 packet->send_buf_index;
775 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
776 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
777 else
778 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
779 packet->total_data_buflen;
780
781 req_id = (ulong)skb;
782
783 if (out_channel->rescind)
784 return -ENODEV;
785
786 /*
787 * It is possible that once we successfully place this packet
788 * on the ringbuffer, we may stop the queue. In that case, we want
789 * to notify the host independent of the xmit_more flag. We don't
790 * need to be precise here; in the worst case we may signal the host
791 * unnecessarily.
792 */
793 if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
794 xmit_more = false;
795
796 if (packet->page_buf_cnt) {
797 pgbuf = packet->cp_partial ? (*pb) +
798 packet->rmsg_pgcnt : (*pb);
799 ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
800 pgbuf,
801 packet->page_buf_cnt,
802 &nvmsg,
803 sizeof(struct nvsp_message),
804 req_id,
805 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
806 !xmit_more);
807 } else {
808 ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
809 sizeof(struct nvsp_message),
810 req_id,
811 VM_PKT_DATA_INBAND,
812 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
813 !xmit_more);
814 }
815
816 if (ret == 0) {
817 atomic_inc(&net_device->num_outstanding_sends);
818 atomic_inc(&net_device->queue_sends[q_idx]);
819
820 if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
821 netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
822
823 if (atomic_read(&net_device->
824 queue_sends[q_idx]) < 1)
825 netif_tx_wake_queue(netdev_get_tx_queue(
826 ndev, q_idx));
827 }
828 } else if (ret == -EAGAIN) {
829 netif_tx_stop_queue(netdev_get_tx_queue(
830 ndev, q_idx));
831 if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
832 netif_tx_wake_queue(netdev_get_tx_queue(
833 ndev, q_idx));
834 ret = -ENOSPC;
835 }
836 } else {
837 netdev_err(ndev, "Unable to send packet %p ret %d\n",
838 packet, ret);
839 }
840
841 return ret;
842 }
843
844 int netvsc_send(struct hv_device *device,
845 struct hv_netvsc_packet *packet,
846 struct rndis_message *rndis_msg,
847 struct hv_page_buffer **pb,
848 struct sk_buff *skb)
849 {
850 struct netvsc_device *net_device;
851 int ret = 0, m_ret = 0;
852 struct vmbus_channel *out_channel;
853 u16 q_idx = packet->q_idx;
854 u32 pktlen = packet->total_data_buflen, msd_len = 0;
855 unsigned int section_index = NETVSC_INVALID_INDEX;
856 struct multi_send_data *msdp;
857 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
858 bool try_batch;
859 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
860
861 net_device = get_outbound_net_device(device);
862 if (!net_device)
863 return -ENODEV;
864
865 out_channel = net_device->chn_table[q_idx];
866
867 packet->send_buf_index = NETVSC_INVALID_INDEX;
868 packet->cp_partial = false;
869
870 msdp = &net_device->msd[q_idx];
871
872 /* batch packets in send buffer if possible */
873 if (msdp->pkt)
874 msd_len = msdp->pkt->total_data_buflen;
875
876 try_batch = (skb != NULL) && msd_len > 0 && msdp->count <
877 net_device->max_pkt;
878
879 if (try_batch && msd_len + pktlen + net_device->pkt_align <
880 net_device->send_section_size) {
881 section_index = msdp->pkt->send_buf_index;
882
883 } else if (try_batch && msd_len + packet->rmsg_size <
884 net_device->send_section_size) {
885 section_index = msdp->pkt->send_buf_index;
886 packet->cp_partial = true;
887
888 } else if ((skb != NULL) && pktlen + net_device->pkt_align <
889 net_device->send_section_size) {
890 section_index = netvsc_get_next_send_section(net_device);
891 if (section_index != NETVSC_INVALID_INDEX) {
892 msd_send = msdp->pkt;
893 msdp->pkt = NULL;
894 msdp->count = 0;
895 msd_len = 0;
896 }
897 }
898
899 if (section_index != NETVSC_INVALID_INDEX) {
900 netvsc_copy_to_send_buf(net_device,
901 section_index, msd_len,
902 packet, rndis_msg, pb, skb);
903
904 packet->send_buf_index = section_index;
905
906 if (packet->cp_partial) {
907 packet->page_buf_cnt -= packet->rmsg_pgcnt;
908 packet->total_data_buflen = msd_len + packet->rmsg_size;
909 } else {
910 packet->page_buf_cnt = 0;
911 packet->total_data_buflen += msd_len;
912 }
913
914 if (msdp->pkt)
915 dev_kfree_skb_any(skb);
916
917 if (xmit_more && !packet->cp_partial) {
918 msdp->pkt = packet;
919 msdp->count++;
920 } else {
921 cur_send = packet;
922 msdp->pkt = NULL;
923 msdp->count = 0;
924 }
925 } else {
926 msd_send = msdp->pkt;
927 msdp->pkt = NULL;
928 msdp->count = 0;
929 cur_send = packet;
930 }
931
932 if (msd_send) {
933 m_ret = netvsc_send_pkt(msd_send, net_device, pb, skb);
934
935 if (m_ret != 0) {
936 netvsc_free_send_slot(net_device,
937 msd_send->send_buf_index);
938 dev_kfree_skb_any(skb);
939 }
940 }
941
942 if (cur_send)
943 ret = netvsc_send_pkt(cur_send, net_device, pb, skb);
944
945 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
946 netvsc_free_send_slot(net_device, section_index);
947
948 return ret;
949 }
950
951 static void netvsc_send_recv_completion(struct hv_device *device,
952 struct vmbus_channel *channel,
953 struct netvsc_device *net_device,
954 u64 transaction_id, u32 status)
955 {
956 struct nvsp_message recvcompMessage;
957 int retries = 0;
958 int ret;
959 struct net_device *ndev;
960
961 ndev = net_device->ndev;
962
963 recvcompMessage.hdr.msg_type =
964 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
965
966 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
967
968 retry_send_cmplt:
969 /* Send the completion */
970 ret = vmbus_sendpacket(channel, &recvcompMessage,
971 sizeof(struct nvsp_message), transaction_id,
972 VM_PKT_COMP, 0);
973 if (ret == 0) {
974 /* success */
975 /* no-op */
976 } else if (ret == -EAGAIN) {
977 /* no more room...wait a bit and attempt to retry 3 times */
978 retries++;
979 netdev_err(ndev, "unable to send receive completion pkt"
980 " (tid %llx)...retrying %d\n", transaction_id, retries);
981
982 if (retries < 4) {
983 udelay(100);
984 goto retry_send_cmplt;
985 } else {
986 netdev_err(ndev, "unable to send receive "
987 "completion pkt (tid %llx)...give up retrying\n",
988 transaction_id);
989 }
990 } else {
991 netdev_err(ndev, "unable to send receive "
992 "completion pkt - %llx\n", transaction_id);
993 }
994 }
995
996 static void netvsc_receive(struct netvsc_device *net_device,
997 struct vmbus_channel *channel,
998 struct hv_device *device,
999 struct vmpacket_descriptor *packet)
1000 {
1001 struct vmtransfer_page_packet_header *vmxferpage_packet;
1002 struct nvsp_message *nvsp_packet;
1003 struct hv_netvsc_packet nv_pkt;
1004 struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
1005 u32 status = NVSP_STAT_SUCCESS;
1006 int i;
1007 int count = 0;
1008 struct net_device *ndev;
1009 void *data;
1010
1011 ndev = net_device->ndev;
1012
1013 /*
1014 * All inbound packets other than send completion should be xfer page
1015 * packet
1016 */
1017 if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
1018 netdev_err(ndev, "Unknown packet type received - %d\n",
1019 packet->type);
1020 return;
1021 }
1022
1023 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
1024 (packet->offset8 << 3));
1025
1026 /* Make sure this is a valid nvsp packet */
1027 if (nvsp_packet->hdr.msg_type !=
1028 NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
1029 netdev_err(ndev, "Unknown nvsp packet type received-"
1030 " %d\n", nvsp_packet->hdr.msg_type);
1031 return;
1032 }
1033
1034 vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
1035
1036 if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
1037 netdev_err(ndev, "Invalid xfer page set id - "
1038 "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
1039 vmxferpage_packet->xfer_pageset_id);
1040 return;
1041 }
1042
1043 count = vmxferpage_packet->range_cnt;
1044
1045 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1046 for (i = 0; i < count; i++) {
1047 /* Initialize the netvsc packet */
1048 data = (void *)((unsigned long)net_device->
1049 recv_buf + vmxferpage_packet->ranges[i].byte_offset);
1050 netvsc_packet->total_data_buflen =
1051 vmxferpage_packet->ranges[i].byte_count;
1052
1053 /* Pass it to the upper layer */
1054 status = rndis_filter_receive(device, netvsc_packet, &data,
1055 channel);
1056
1057 }
1058
1059 netvsc_send_recv_completion(device, channel, net_device,
1060 vmxferpage_packet->d.trans_id, status);
1061 }
1062
1063
1064 static void netvsc_send_table(struct hv_device *hdev,
1065 struct nvsp_message *nvmsg)
1066 {
1067 struct netvsc_device *nvscdev;
1068 struct net_device *ndev;
1069 int i;
1070 u32 count, *tab;
1071
1072 nvscdev = get_outbound_net_device(hdev);
1073 if (!nvscdev)
1074 return;
1075 ndev = nvscdev->ndev;
1076
1077 count = nvmsg->msg.v5_msg.send_table.count;
1078 if (count != VRSS_SEND_TAB_SIZE) {
1079 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1080 return;
1081 }
1082
1083 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
1084 nvmsg->msg.v5_msg.send_table.offset);
1085
1086 for (i = 0; i < count; i++)
1087 nvscdev->send_table[i] = tab[i];
1088 }
1089
1090 static void netvsc_send_vf(struct netvsc_device *nvdev,
1091 struct nvsp_message *nvmsg)
1092 {
1093 nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1094 nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1095 }
1096
1097 static inline void netvsc_receive_inband(struct hv_device *hdev,
1098 struct netvsc_device *nvdev,
1099 struct nvsp_message *nvmsg)
1100 {
1101 switch (nvmsg->hdr.msg_type) {
1102 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1103 netvsc_send_table(hdev, nvmsg);
1104 break;
1105
1106 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1107 netvsc_send_vf(nvdev, nvmsg);
1108 break;
1109 }
1110 }
1111
1112 void netvsc_channel_cb(void *context)
1113 {
1114 int ret;
1115 struct vmbus_channel *channel = (struct vmbus_channel *)context;
1116 struct hv_device *device;
1117 struct netvsc_device *net_device;
1118 u32 bytes_recvd;
1119 u64 request_id;
1120 struct vmpacket_descriptor *desc;
1121 unsigned char *buffer;
1122 int bufferlen = NETVSC_PACKET_SIZE;
1123 struct net_device *ndev;
1124 struct nvsp_message *nvmsg;
1125
1126 if (channel->primary_channel != NULL)
1127 device = channel->primary_channel->device_obj;
1128 else
1129 device = channel->device_obj;
1130
1131 net_device = get_inbound_net_device(device);
1132 if (!net_device)
1133 return;
1134 ndev = net_device->ndev;
1135 buffer = get_per_channel_state(channel);
1136
1137 do {
1138 ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
1139 &bytes_recvd, &request_id);
1140 if (ret == 0) {
1141 if (bytes_recvd > 0) {
1142 desc = (struct vmpacket_descriptor *)buffer;
1143 nvmsg = (struct nvsp_message *)((unsigned long)
1144 desc + (desc->offset8 << 3));
1145 switch (desc->type) {
1146 case VM_PKT_COMP:
1147 netvsc_send_completion(net_device,
1148 channel,
1149 device, desc);
1150 break;
1151
1152 case VM_PKT_DATA_USING_XFER_PAGES:
1153 netvsc_receive(net_device, channel,
1154 device, desc);
1155 break;
1156
1157 case VM_PKT_DATA_INBAND:
1158 netvsc_receive_inband(device,
1159 net_device,
1160 nvmsg);
1161 break;
1162
1163 default:
1164 netdev_err(ndev,
1165 "unhandled packet type %d, "
1166 "tid %llx len %d\n",
1167 desc->type, request_id,
1168 bytes_recvd);
1169 break;
1170 }
1171
1172 } else {
1173 /*
1174 * We are done for this pass.
1175 */
1176 break;
1177 }
1178
1179 } else if (ret == -ENOBUFS) {
1180 if (bufferlen > NETVSC_PACKET_SIZE)
1181 kfree(buffer);
1182 /* Handle large packet */
1183 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
1184 if (buffer == NULL) {
1185 /* Try again next time around */
1186 netdev_err(ndev,
1187 "unable to allocate buffer of size "
1188 "(%d)!!\n", bytes_recvd);
1189 break;
1190 }
1191
1192 bufferlen = bytes_recvd;
1193 }
1194 } while (1);
1195
1196 if (bufferlen > NETVSC_PACKET_SIZE)
1197 kfree(buffer);
1198 return;
1199 }
1200
1201 /*
1202 * netvsc_device_add - Callback when the device belonging to this
1203 * driver is added
1204 */
1205 int netvsc_device_add(struct hv_device *device, void *additional_info)
1206 {
1207 int ret = 0;
1208 int ring_size =
1209 ((struct netvsc_device_info *)additional_info)->ring_size;
1210 struct netvsc_device *net_device;
1211 struct net_device *ndev;
1212
1213 net_device = alloc_net_device(device);
1214 if (!net_device)
1215 return -ENOMEM;
1216
1217 net_device->ring_size = ring_size;
1218
1219 /*
1220 * Coming into this function, struct net_device * is
1221 * registered as the driver private data.
1222 * In alloc_net_device(), we register struct netvsc_device *
1223 * as the driver private data and stash away struct net_device *
1224 * in struct netvsc_device *.
1225 */
1226 ndev = net_device->ndev;
1227
1228 /* Add netvsc_device context to netvsc_device */
1229 net_device->nd_ctx = netdev_priv(ndev);
1230
1231 /* Initialize the NetVSC channel extension */
1232 init_completion(&net_device->channel_init_wait);
1233
1234 set_per_channel_state(device->channel, net_device->cb_buffer);
1235
1236 /* Open the channel */
1237 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
1238 ring_size * PAGE_SIZE, NULL, 0,
1239 netvsc_channel_cb, device->channel);
1240
1241 if (ret != 0) {
1242 netdev_err(ndev, "unable to open channel: %d\n", ret);
1243 goto cleanup;
1244 }
1245
1246 /* Channel is opened */
1247 pr_info("hv_netvsc channel opened successfully\n");
1248
1249 net_device->chn_table[0] = device->channel;
1250
1251 /* Connect with the NetVsp */
1252 ret = netvsc_connect_vsp(device);
1253 if (ret != 0) {
1254 netdev_err(ndev,
1255 "unable to connect to NetVSP - %d\n", ret);
1256 goto close;
1257 }
1258
1259 return ret;
1260
1261 close:
1262 /* Now, we can close the channel safely */
1263 vmbus_close(device->channel);
1264
1265 cleanup:
1266 free_netvsc_device(net_device);
1267
1268 return ret;
1269 }
This page took 0.061707 seconds and 5 git commands to generate.