module_param(gso, bool, 0444);
/* FIXME: MTU in config. */
-#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
+#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
+#define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \
+ sizeof(struct virtio_net_hdr_mrg_rxbuf), \
+ L1_CACHE_BYTES))
#define GOOD_COPY_LEN 128
#define VIRTNET_DRIVER_VERSION "1.0.0"
return skb;
}
-static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
+static struct sk_buff *receive_mergeable(struct net_device *dev,
+ struct receive_queue *rq,
+ void *buf,
+ unsigned int len)
{
- struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb);
+ struct skb_vnet_hdr *hdr = buf;
+ int num_buf = hdr->mhdr.num_buffers;
+ struct page *page = virt_to_head_page(buf);
+ int offset = buf - page_address(page);
+ struct sk_buff *head_skb = page_to_skb(rq, page, offset, len,
+ MERGE_BUFFER_LEN);
struct sk_buff *curr_skb = head_skb;
- char *buf;
- struct page *page;
- int num_buf, len, offset;
- num_buf = hdr->mhdr.num_buffers;
+ if (unlikely(!curr_skb))
+ goto err_skb;
+
while (--num_buf) {
- int num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
+ int num_skb_frags;
+
buf = virtqueue_get_buf(rq->vq, &len);
if (unlikely(!buf)) {
- pr_debug("%s: rx error: %d buffers missing\n",
- head_skb->dev->name, hdr->mhdr.num_buffers);
- head_skb->dev->stats.rx_length_errors++;
- return -EINVAL;
+ pr_debug("%s: rx error: %d buffers out of %d missing\n",
+ dev->name, num_buf, hdr->mhdr.num_buffers);
+ dev->stats.rx_length_errors++;
+ goto err_buf;
}
- if (unlikely(len > MAX_PACKET_LEN)) {
+ if (unlikely(len > MERGE_BUFFER_LEN)) {
pr_debug("%s: rx error: merge buffer too long\n",
- head_skb->dev->name);
- len = MAX_PACKET_LEN;
+ dev->name);
+ len = MERGE_BUFFER_LEN;
}
+
+ page = virt_to_head_page(buf);
+ --rq->num;
+
+ num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
- if (unlikely(!nskb)) {
- head_skb->dev->stats.rx_dropped++;
- return -ENOMEM;
- }
+
+ if (unlikely(!nskb))
+ goto err_skb;
if (curr_skb == head_skb)
skb_shinfo(curr_skb)->frag_list = nskb;
else
if (curr_skb != head_skb) {
head_skb->data_len += len;
head_skb->len += len;
- head_skb->truesize += MAX_PACKET_LEN;
+ head_skb->truesize += MERGE_BUFFER_LEN;
}
- page = virt_to_head_page(buf);
- offset = buf - (char *)page_address(page);
+ offset = buf - page_address(page);
if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
put_page(page);
skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
- len, MAX_PACKET_LEN);
+ len, MERGE_BUFFER_LEN);
} else {
skb_add_rx_frag(curr_skb, num_skb_frags, page,
- offset, len,
- MAX_PACKET_LEN);
+ offset, len, MERGE_BUFFER_LEN);
}
+ }
+
+ return head_skb;
+
+err_skb:
+ put_page(page);
+ while (--num_buf) {
+ buf = virtqueue_get_buf(rq->vq, &len);
+ if (unlikely(!buf)) {
+ pr_debug("%s: rx error: %d buffers missing\n",
+ dev->name, num_buf);
+ dev->stats.rx_length_errors++;
+ break;
+ }
+ page = virt_to_head_page(buf);
+ put_page(page);
--rq->num;
}
- return 0;
+err_buf:
+ dev->stats.rx_dropped++;
+ dev_kfree_skb(head_skb);
+ return NULL;
}
static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
len -= sizeof(struct virtio_net_hdr);
skb_trim(skb, len);
} else if (vi->mergeable_rx_bufs) {
- struct page *page = virt_to_head_page(buf);
- skb = page_to_skb(rq, page,
- (char *)buf - (char *)page_address(page),
- len, MAX_PACKET_LEN);
- if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
- put_page(page);
- return;
- }
- if (receive_mergeable(rq, skb)) {
- dev_kfree_skb(skb);
+ skb = receive_mergeable(dev, rq, buf, len);
+ if (unlikely(!skb))
return;
- }
} else {
page = buf;
skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
struct skb_vnet_hdr *hdr;
int err;
- skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
+ skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp);
if (unlikely(!skb))
return -ENOMEM;
- skb_put(skb, MAX_PACKET_LEN);
+ skb_put(skb, GOOD_PACKET_LEN);
hdr = skb_vnet_hdr(skb);
sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
int err;
if (gfp & __GFP_WAIT) {
- if (skb_page_frag_refill(MAX_PACKET_LEN, &vi->alloc_frag,
+ if (skb_page_frag_refill(MERGE_BUFFER_LEN, &vi->alloc_frag,
gfp)) {
buf = (char *)page_address(vi->alloc_frag.page) +
vi->alloc_frag.offset;
get_page(vi->alloc_frag.page);
- vi->alloc_frag.offset += MAX_PACKET_LEN;
+ vi->alloc_frag.offset += MERGE_BUFFER_LEN;
}
} else {
- buf = netdev_alloc_frag(MAX_PACKET_LEN);
+ buf = netdev_alloc_frag(MERGE_BUFFER_LEN);
}
if (!buf)
return -ENOMEM;
- sg_init_one(rq->sg, buf, MAX_PACKET_LEN);
+ sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN);
err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
if (err < 0)
put_page(virt_to_head_page(buf));
} while (rq->vq->num_free);
if (unlikely(rq->num > rq->max))
rq->max = rq->num;
- virtqueue_kick(rq->vq);
+ if (unlikely(!virtqueue_kick(rq->vq)))
+ return false;
return !oom;
}
err = xmit_skb(sq, skb);
/* This should not happen! */
- if (unlikely(err)) {
+ if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
dev->stats.tx_fifo_errors++;
if (net_ratelimit())
dev_warn(&dev->dev,
kfree_skb(skb);
return NETDEV_TX_OK;
}
- virtqueue_kick(sq->vq);
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
< 0);
- virtqueue_kick(vi->cvq);
+ if (unlikely(!virtqueue_kick(vi->cvq)))
+ return status == VIRTIO_NET_OK;
/* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
*/
- while (!virtqueue_get_buf(vi->cvq, &tmp))
+ while (!virtqueue_get_buf(vi->cvq, &tmp) &&
+ !virtqueue_is_broken(vi->cvq))
cpu_relax();
return status == VIRTIO_NET_OK;
return -EINVAL;
}
} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
- vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
- addr->sa_data, dev->addr_len);
+ unsigned int i;
+
+ /* Naturally, this has an atomicity problem. */
+ for (i = 0; i < dev->addr_len; i++)
+ virtio_cwrite8(vdev,
+ offsetof(struct virtio_net_config, mac) +
+ i, addr->sa_data[i]);
}
eth_commit_mac_addr_change(dev, p);
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
sg, NULL))
- dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
+ dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
kfree(buf);
}
if (!vi->config_enable)
goto done;
- if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
- offsetof(struct virtio_net_config, status),
- &v) < 0)
+ if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
+ struct virtio_net_config, status, &v) < 0)
goto done;
if (v & VIRTIO_NET_S_ANNOUNCE) {
u16 max_queue_pairs;
/* Find if host supports multiqueue virtio_net device */
- err = virtio_config_val(vdev, VIRTIO_NET_F_MQ,
- offsetof(struct virtio_net_config,
- max_virtqueue_pairs), &max_queue_pairs);
+ err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
+ struct virtio_net_config,
+ max_virtqueue_pairs, &max_queue_pairs);
/* We need at least 2 queue's */
if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
dev->vlan_features = dev->features;
/* Configuration may specify what MAC to use. Otherwise random. */
- if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
- offsetof(struct virtio_net_config, mac),
- dev->dev_addr, dev->addr_len) < 0)
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
+ virtio_cread_bytes(vdev,
+ offsetof(struct virtio_net_config, mac),
+ dev->dev_addr, dev->addr_len);
+ else
eth_hw_addr_random(dev);
/* Set up our device-specific information */
if (vi->stats == NULL)
goto free;
+ for_each_possible_cpu(i) {
+ struct virtnet_stats *virtnet_stats;
+ virtnet_stats = per_cpu_ptr(vi->stats, i);
+ u64_stats_init(&virtnet_stats->tx_syncp);
+ u64_stats_init(&virtnet_stats->rx_syncp);
+ }
+
mutex_init(&vi->config_lock);
vi->config_enable = true;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
if (err)
goto free_stats;
- netif_set_real_num_tx_queues(dev, 1);
- netif_set_real_num_rx_queues(dev, 1);
+ netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
+ netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
err = register_netdev(dev);
if (err) {
free_netdev(vi->dev);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtnet_freeze(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
.probe = virtnet_probe,
.remove = virtnet_remove,
.config_changed = virtnet_config_changed,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.freeze = virtnet_freeze,
.restore = virtnet_restore,
#endif