veth: fix 64bit stats on 32bit arches
[deliverable/linux.git] / drivers / net / veth.c
1 /*
2 * drivers/net/veth.c
3 *
4 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
8 *
9 */
10
11 #include <linux/netdevice.h>
12 #include <linux/slab.h>
13 #include <linux/ethtool.h>
14 #include <linux/etherdevice.h>
15 #include <linux/u64_stats_sync.h>
16
17 #include <net/dst.h>
18 #include <net/xfrm.h>
19 #include <linux/veth.h>
20
21 #define DRV_NAME "veth"
22 #define DRV_VERSION "1.0"
23
24 #define MIN_MTU 68 /* Min L3 MTU */
25 #define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */
26
27 struct veth_net_stats {
28 u64 rx_packets;
29 u64 tx_packets;
30 u64 rx_bytes;
31 u64 tx_bytes;
32 u64 rx_dropped;
33 u64 tx_dropped;
34 struct u64_stats_sync syncp;
35 };
36
37 struct veth_priv {
38 struct net_device *peer;
39 struct veth_net_stats __percpu *stats;
40 };
41
42 /*
43 * ethtool interface
44 */
45
46 static struct {
47 const char string[ETH_GSTRING_LEN];
48 } ethtool_stats_keys[] = {
49 { "peer_ifindex" },
50 };
51
52 static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
53 {
54 cmd->supported = 0;
55 cmd->advertising = 0;
56 ethtool_cmd_speed_set(cmd, SPEED_10000);
57 cmd->duplex = DUPLEX_FULL;
58 cmd->port = PORT_TP;
59 cmd->phy_address = 0;
60 cmd->transceiver = XCVR_INTERNAL;
61 cmd->autoneg = AUTONEG_DISABLE;
62 cmd->maxtxpkt = 0;
63 cmd->maxrxpkt = 0;
64 return 0;
65 }
66
67 static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
68 {
69 strcpy(info->driver, DRV_NAME);
70 strcpy(info->version, DRV_VERSION);
71 strcpy(info->fw_version, "N/A");
72 }
73
74 static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
75 {
76 switch(stringset) {
77 case ETH_SS_STATS:
78 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
79 break;
80 }
81 }
82
83 static int veth_get_sset_count(struct net_device *dev, int sset)
84 {
85 switch (sset) {
86 case ETH_SS_STATS:
87 return ARRAY_SIZE(ethtool_stats_keys);
88 default:
89 return -EOPNOTSUPP;
90 }
91 }
92
93 static void veth_get_ethtool_stats(struct net_device *dev,
94 struct ethtool_stats *stats, u64 *data)
95 {
96 struct veth_priv *priv;
97
98 priv = netdev_priv(dev);
99 data[0] = priv->peer->ifindex;
100 }
101
102 static const struct ethtool_ops veth_ethtool_ops = {
103 .get_settings = veth_get_settings,
104 .get_drvinfo = veth_get_drvinfo,
105 .get_link = ethtool_op_get_link,
106 .get_strings = veth_get_strings,
107 .get_sset_count = veth_get_sset_count,
108 .get_ethtool_stats = veth_get_ethtool_stats,
109 };
110
111 /*
112 * xmit
113 */
114
115 static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
116 {
117 struct net_device *rcv = NULL;
118 struct veth_priv *priv, *rcv_priv;
119 struct veth_net_stats *stats, *rcv_stats;
120 int length;
121
122 priv = netdev_priv(dev);
123 rcv = priv->peer;
124 rcv_priv = netdev_priv(rcv);
125
126 stats = this_cpu_ptr(priv->stats);
127 rcv_stats = this_cpu_ptr(rcv_priv->stats);
128
129 if (!(rcv->flags & IFF_UP))
130 goto tx_drop;
131
132 /* don't change ip_summed == CHECKSUM_PARTIAL, as that
133 will cause bad checksum on forwarded packets */
134 if (skb->ip_summed == CHECKSUM_NONE &&
135 rcv->features & NETIF_F_RXCSUM)
136 skb->ip_summed = CHECKSUM_UNNECESSARY;
137
138 length = skb->len;
139 if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
140 goto rx_drop;
141
142 u64_stats_update_begin(&stats->syncp);
143 stats->tx_bytes += length;
144 stats->tx_packets++;
145 u64_stats_update_end(&stats->syncp);
146
147 u64_stats_update_begin(&rcv_stats->syncp);
148 rcv_stats->rx_bytes += length;
149 rcv_stats->rx_packets++;
150 u64_stats_update_end(&rcv_stats->syncp);
151
152 return NETDEV_TX_OK;
153
154 tx_drop:
155 kfree_skb(skb);
156 u64_stats_update_begin(&stats->syncp);
157 stats->tx_dropped++;
158 u64_stats_update_end(&stats->syncp);
159 return NETDEV_TX_OK;
160
161 rx_drop:
162 u64_stats_update_begin(&rcv_stats->syncp);
163 rcv_stats->rx_dropped++;
164 u64_stats_update_end(&rcv_stats->syncp);
165 return NETDEV_TX_OK;
166 }
167
168 /*
169 * general routines
170 */
171
172 static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
173 struct rtnl_link_stats64 *tot)
174 {
175 struct veth_priv *priv = netdev_priv(dev);
176 int cpu;
177
178 for_each_possible_cpu(cpu) {
179 struct veth_net_stats *stats = per_cpu_ptr(priv->stats, cpu);
180 u64 rx_packets, rx_bytes, rx_dropped;
181 u64 tx_packets, tx_bytes, tx_dropped;
182 unsigned int start;
183
184 do {
185 start = u64_stats_fetch_begin_bh(&stats->syncp);
186 rx_packets = stats->rx_packets;
187 tx_packets = stats->tx_packets;
188 rx_bytes = stats->rx_bytes;
189 tx_bytes = stats->tx_bytes;
190 rx_dropped = stats->rx_dropped;
191 tx_dropped = stats->tx_dropped;
192 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
193 tot->rx_packets += rx_packets;
194 tot->tx_packets += tx_packets;
195 tot->rx_bytes += rx_bytes;
196 tot->tx_bytes += tx_bytes;
197 tot->rx_dropped += rx_dropped;
198 tot->tx_dropped += tx_dropped;
199 }
200
201 return tot;
202 }
203
204 static int veth_open(struct net_device *dev)
205 {
206 struct veth_priv *priv;
207
208 priv = netdev_priv(dev);
209 if (priv->peer == NULL)
210 return -ENOTCONN;
211
212 if (priv->peer->flags & IFF_UP) {
213 netif_carrier_on(dev);
214 netif_carrier_on(priv->peer);
215 }
216 return 0;
217 }
218
219 static int veth_close(struct net_device *dev)
220 {
221 struct veth_priv *priv = netdev_priv(dev);
222
223 netif_carrier_off(dev);
224 netif_carrier_off(priv->peer);
225
226 return 0;
227 }
228
229 static int is_valid_veth_mtu(int new_mtu)
230 {
231 return new_mtu >= MIN_MTU && new_mtu <= MAX_MTU;
232 }
233
234 static int veth_change_mtu(struct net_device *dev, int new_mtu)
235 {
236 if (!is_valid_veth_mtu(new_mtu))
237 return -EINVAL;
238 dev->mtu = new_mtu;
239 return 0;
240 }
241
242 static int veth_dev_init(struct net_device *dev)
243 {
244 struct veth_net_stats __percpu *stats;
245 struct veth_priv *priv;
246
247 stats = alloc_percpu(struct veth_net_stats);
248 if (stats == NULL)
249 return -ENOMEM;
250
251 priv = netdev_priv(dev);
252 priv->stats = stats;
253 return 0;
254 }
255
256 static void veth_dev_free(struct net_device *dev)
257 {
258 struct veth_priv *priv;
259
260 priv = netdev_priv(dev);
261 free_percpu(priv->stats);
262 free_netdev(dev);
263 }
264
265 static const struct net_device_ops veth_netdev_ops = {
266 .ndo_init = veth_dev_init,
267 .ndo_open = veth_open,
268 .ndo_stop = veth_close,
269 .ndo_start_xmit = veth_xmit,
270 .ndo_change_mtu = veth_change_mtu,
271 .ndo_get_stats64 = veth_get_stats64,
272 .ndo_set_mac_address = eth_mac_addr,
273 };
274
275 static void veth_setup(struct net_device *dev)
276 {
277 ether_setup(dev);
278
279 dev->netdev_ops = &veth_netdev_ops;
280 dev->ethtool_ops = &veth_ethtool_ops;
281 dev->features |= NETIF_F_LLTX;
282 dev->destructor = veth_dev_free;
283
284 dev->hw_features = NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
285 }
286
287 /*
288 * netlink interface
289 */
290
291 static int veth_validate(struct nlattr *tb[], struct nlattr *data[])
292 {
293 if (tb[IFLA_ADDRESS]) {
294 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
295 return -EINVAL;
296 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
297 return -EADDRNOTAVAIL;
298 }
299 if (tb[IFLA_MTU]) {
300 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
301 return -EINVAL;
302 }
303 return 0;
304 }
305
306 static struct rtnl_link_ops veth_link_ops;
307
308 static int veth_newlink(struct net *src_net, struct net_device *dev,
309 struct nlattr *tb[], struct nlattr *data[])
310 {
311 int err;
312 struct net_device *peer;
313 struct veth_priv *priv;
314 char ifname[IFNAMSIZ];
315 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
316 struct ifinfomsg *ifmp;
317 struct net *net;
318
319 /*
320 * create and register peer first
321 */
322 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
323 struct nlattr *nla_peer;
324
325 nla_peer = data[VETH_INFO_PEER];
326 ifmp = nla_data(nla_peer);
327 err = nla_parse(peer_tb, IFLA_MAX,
328 nla_data(nla_peer) + sizeof(struct ifinfomsg),
329 nla_len(nla_peer) - sizeof(struct ifinfomsg),
330 ifla_policy);
331 if (err < 0)
332 return err;
333
334 err = veth_validate(peer_tb, NULL);
335 if (err < 0)
336 return err;
337
338 tbp = peer_tb;
339 } else {
340 ifmp = NULL;
341 tbp = tb;
342 }
343
344 if (tbp[IFLA_IFNAME])
345 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
346 else
347 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
348
349 net = rtnl_link_get_net(src_net, tbp);
350 if (IS_ERR(net))
351 return PTR_ERR(net);
352
353 peer = rtnl_create_link(src_net, net, ifname, &veth_link_ops, tbp);
354 if (IS_ERR(peer)) {
355 put_net(net);
356 return PTR_ERR(peer);
357 }
358
359 if (tbp[IFLA_ADDRESS] == NULL)
360 random_ether_addr(peer->dev_addr);
361
362 err = register_netdevice(peer);
363 put_net(net);
364 net = NULL;
365 if (err < 0)
366 goto err_register_peer;
367
368 netif_carrier_off(peer);
369
370 err = rtnl_configure_link(peer, ifmp);
371 if (err < 0)
372 goto err_configure_peer;
373
374 /*
375 * register dev last
376 *
377 * note, that since we've registered new device the dev's name
378 * should be re-allocated
379 */
380
381 if (tb[IFLA_ADDRESS] == NULL)
382 random_ether_addr(dev->dev_addr);
383
384 if (tb[IFLA_IFNAME])
385 nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
386 else
387 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
388
389 if (strchr(dev->name, '%')) {
390 err = dev_alloc_name(dev, dev->name);
391 if (err < 0)
392 goto err_alloc_name;
393 }
394
395 err = register_netdevice(dev);
396 if (err < 0)
397 goto err_register_dev;
398
399 netif_carrier_off(dev);
400
401 /*
402 * tie the deviced together
403 */
404
405 priv = netdev_priv(dev);
406 priv->peer = peer;
407
408 priv = netdev_priv(peer);
409 priv->peer = dev;
410 return 0;
411
412 err_register_dev:
413 /* nothing to do */
414 err_alloc_name:
415 err_configure_peer:
416 unregister_netdevice(peer);
417 return err;
418
419 err_register_peer:
420 free_netdev(peer);
421 return err;
422 }
423
424 static void veth_dellink(struct net_device *dev, struct list_head *head)
425 {
426 struct veth_priv *priv;
427 struct net_device *peer;
428
429 priv = netdev_priv(dev);
430 peer = priv->peer;
431
432 unregister_netdevice_queue(dev, head);
433 unregister_netdevice_queue(peer, head);
434 }
435
436 static const struct nla_policy veth_policy[VETH_INFO_MAX + 1];
437
438 static struct rtnl_link_ops veth_link_ops = {
439 .kind = DRV_NAME,
440 .priv_size = sizeof(struct veth_priv),
441 .setup = veth_setup,
442 .validate = veth_validate,
443 .newlink = veth_newlink,
444 .dellink = veth_dellink,
445 .policy = veth_policy,
446 .maxtype = VETH_INFO_MAX,
447 };
448
449 /*
450 * init/fini
451 */
452
453 static __init int veth_init(void)
454 {
455 return rtnl_link_register(&veth_link_ops);
456 }
457
458 static __exit void veth_exit(void)
459 {
460 rtnl_link_unregister(&veth_link_ops);
461 }
462
463 module_init(veth_init);
464 module_exit(veth_exit);
465
466 MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
467 MODULE_LICENSE("GPL v2");
468 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
This page took 0.046368 seconds and 5 git commands to generate.