2 * VXLAN: Virtual eXtensiable Local Area Network
4 * Copyright (c) 2012 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 * - use IANA UDP port number (when defined)
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/module.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/rculist.h>
24 #include <linux/netdevice.h>
27 #include <linux/udp.h>
28 #include <linux/igmp.h>
29 #include <linux/etherdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/hash.h>
35 #include <net/rtnetlink.h>
36 #include <net/route.h>
37 #include <net/dsfield.h>
38 #include <net/inet_ecn.h>
39 #include <net/net_namespace.h>
40 #include <net/netns/generic.h>
42 #define VXLAN_VERSION "0.1"
44 #define VNI_HASH_BITS 10
45 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
46 #define FDB_HASH_BITS 8
47 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
48 #define FDB_AGE_DEFAULT 300 /* 5 min */
49 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
51 #define VXLAN_N_VID (1u << 24)
52 #define VXLAN_VID_MASK (VXLAN_N_VID - 1)
53 /* VLAN + IP header + UDP + VXLAN */
54 #define VXLAN_HEADROOM (4 + 20 + 8 + 8)
56 #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
58 /* VXLAN protocol header */
64 /* UDP port for VXLAN traffic. */
65 static unsigned int vxlan_port __read_mostly
= 8472;
66 module_param_named(udp_port
, vxlan_port
, uint
, 0444);
67 MODULE_PARM_DESC(udp_port
, "Destination UDP port");
69 static bool log_ecn_error
= true;
70 module_param(log_ecn_error
, bool, 0644);
71 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
73 /* per-net private data for this module */
74 static unsigned int vxlan_net_id
;
76 struct socket
*sock
; /* UDP encap socket */
77 struct hlist_head vni_list
[VNI_HASH_SIZE
];
80 /* Forwarding table entry */
82 struct hlist_node hlist
; /* linked list of entries */
84 unsigned long updated
; /* jiffies */
87 u16 state
; /* see ndm_state */
88 u8 eth_addr
[ETH_ALEN
];
91 /* Per-cpu network traffic stats */
97 struct u64_stats_sync syncp
;
100 /* Pseudo network device */
102 struct hlist_node hlist
;
103 struct net_device
*dev
;
104 struct vxlan_stats __percpu
*stats
;
105 __u32 vni
; /* virtual network id */
106 __be32 gaddr
; /* multicast group */
107 __be32 saddr
; /* source address */
108 unsigned int link
; /* link to multicast over */
109 __u8 tos
; /* TOS override */
113 unsigned long age_interval
;
114 struct timer_list age_timer
;
115 spinlock_t hash_lock
;
116 unsigned int addrcnt
;
117 unsigned int addrmax
;
118 unsigned int addrexceeded
;
120 struct hlist_head fdb_head
[FDB_HASH_SIZE
];
123 /* salt for hash table */
124 static u32 vxlan_salt __read_mostly
;
126 static inline struct hlist_head
*vni_head(struct net
*net
, u32 id
)
128 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
130 return &vn
->vni_list
[hash_32(id
, VNI_HASH_BITS
)];
133 /* Look up VNI in a per net namespace table */
134 static struct vxlan_dev
*vxlan_find_vni(struct net
*net
, u32 id
)
136 struct vxlan_dev
*vxlan
;
137 struct hlist_node
*node
;
139 hlist_for_each_entry_rcu(vxlan
, node
, vni_head(net
, id
), hlist
) {
140 if (vxlan
->vni
== id
)
147 /* Fill in neighbour message in skbuff. */
148 static int vxlan_fdb_info(struct sk_buff
*skb
, struct vxlan_dev
*vxlan
,
149 const struct vxlan_fdb
*fdb
,
150 u32 portid
, u32 seq
, int type
, unsigned int flags
)
152 unsigned long now
= jiffies
;
153 struct nda_cacheinfo ci
;
154 struct nlmsghdr
*nlh
;
157 nlh
= nlmsg_put(skb
, portid
, seq
, type
, sizeof(*ndm
), flags
);
161 ndm
= nlmsg_data(nlh
);
162 memset(ndm
, 0, sizeof(*ndm
));
163 ndm
->ndm_family
= AF_BRIDGE
;
164 ndm
->ndm_state
= fdb
->state
;
165 ndm
->ndm_ifindex
= vxlan
->dev
->ifindex
;
166 ndm
->ndm_flags
= NTF_SELF
;
167 ndm
->ndm_type
= NDA_DST
;
169 if (nla_put(skb
, NDA_LLADDR
, ETH_ALEN
, &fdb
->eth_addr
))
170 goto nla_put_failure
;
172 if (nla_put_be32(skb
, NDA_DST
, fdb
->remote_ip
))
173 goto nla_put_failure
;
175 ci
.ndm_used
= jiffies_to_clock_t(now
- fdb
->used
);
176 ci
.ndm_confirmed
= 0;
177 ci
.ndm_updated
= jiffies_to_clock_t(now
- fdb
->updated
);
180 if (nla_put(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
))
181 goto nla_put_failure
;
183 return nlmsg_end(skb
, nlh
);
186 nlmsg_cancel(skb
, nlh
);
190 static inline size_t vxlan_nlmsg_size(void)
192 return NLMSG_ALIGN(sizeof(struct ndmsg
))
193 + nla_total_size(ETH_ALEN
) /* NDA_LLADDR */
194 + nla_total_size(sizeof(__be32
)) /* NDA_DST */
195 + nla_total_size(sizeof(struct nda_cacheinfo
));
198 static void vxlan_fdb_notify(struct vxlan_dev
*vxlan
,
199 const struct vxlan_fdb
*fdb
, int type
)
201 struct net
*net
= dev_net(vxlan
->dev
);
205 skb
= nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC
);
209 err
= vxlan_fdb_info(skb
, vxlan
, fdb
, 0, 0, type
, 0);
211 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
212 WARN_ON(err
== -EMSGSIZE
);
217 rtnl_notify(skb
, net
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
221 rtnl_set_sk_err(net
, RTNLGRP_NEIGH
, err
);
224 /* Hash Ethernet address */
225 static u32
eth_hash(const unsigned char *addr
)
227 u64 value
= get_unaligned((u64
*)addr
);
229 /* only want 6 bytes */
235 return hash_64(value
, FDB_HASH_BITS
);
238 /* Hash chain to use given mac address */
239 static inline struct hlist_head
*vxlan_fdb_head(struct vxlan_dev
*vxlan
,
242 return &vxlan
->fdb_head
[eth_hash(mac
)];
245 /* Look up Ethernet address in forwarding table */
246 static struct vxlan_fdb
*vxlan_find_mac(struct vxlan_dev
*vxlan
,
250 struct hlist_head
*head
= vxlan_fdb_head(vxlan
, mac
);
252 struct hlist_node
*node
;
254 hlist_for_each_entry_rcu(f
, node
, head
, hlist
) {
255 if (compare_ether_addr(mac
, f
->eth_addr
) == 0)
262 /* Add new entry to forwarding table -- assumes lock held */
263 static int vxlan_fdb_create(struct vxlan_dev
*vxlan
,
264 const u8
*mac
, __be32 ip
,
265 __u16 state
, __u16 flags
)
270 f
= vxlan_find_mac(vxlan
, mac
);
272 if (flags
& NLM_F_EXCL
) {
273 netdev_dbg(vxlan
->dev
,
274 "lost race to create %pM\n", mac
);
277 if (f
->state
!= state
) {
279 f
->updated
= jiffies
;
283 if (!(flags
& NLM_F_CREATE
))
286 if (vxlan
->addrmax
&& vxlan
->addrcnt
>= vxlan
->addrmax
)
289 netdev_dbg(vxlan
->dev
, "add %pM -> %pI4\n", mac
, &ip
);
290 f
= kmalloc(sizeof(*f
), GFP_ATOMIC
);
297 f
->updated
= f
->used
= jiffies
;
298 memcpy(f
->eth_addr
, mac
, ETH_ALEN
);
301 hlist_add_head_rcu(&f
->hlist
,
302 vxlan_fdb_head(vxlan
, mac
));
306 vxlan_fdb_notify(vxlan
, f
, RTM_NEWNEIGH
);
311 static void vxlan_fdb_destroy(struct vxlan_dev
*vxlan
, struct vxlan_fdb
*f
)
313 netdev_dbg(vxlan
->dev
,
314 "delete %pM\n", f
->eth_addr
);
317 vxlan_fdb_notify(vxlan
, f
, RTM_DELNEIGH
);
319 hlist_del_rcu(&f
->hlist
);
323 /* Add static entry (via netlink) */
324 static int vxlan_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
325 struct net_device
*dev
,
326 const unsigned char *addr
, u16 flags
)
328 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
332 if (!(ndm
->ndm_state
& (NUD_PERMANENT
|NUD_REACHABLE
))) {
333 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
338 if (tb
[NDA_DST
] == NULL
)
341 if (nla_len(tb
[NDA_DST
]) != sizeof(__be32
))
342 return -EAFNOSUPPORT
;
344 ip
= nla_get_be32(tb
[NDA_DST
]);
346 spin_lock_bh(&vxlan
->hash_lock
);
347 err
= vxlan_fdb_create(vxlan
, addr
, ip
, ndm
->ndm_state
, flags
);
348 spin_unlock_bh(&vxlan
->hash_lock
);
353 /* Delete entry (via netlink) */
354 static int vxlan_fdb_delete(struct ndmsg
*ndm
, struct net_device
*dev
,
355 const unsigned char *addr
)
357 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
361 spin_lock_bh(&vxlan
->hash_lock
);
362 f
= vxlan_find_mac(vxlan
, addr
);
364 vxlan_fdb_destroy(vxlan
, f
);
367 spin_unlock_bh(&vxlan
->hash_lock
);
372 /* Dump forwarding table */
373 static int vxlan_fdb_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
,
374 struct net_device
*dev
, int idx
)
376 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
379 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
381 struct hlist_node
*n
;
384 hlist_for_each_entry_rcu(f
, n
, &vxlan
->fdb_head
[h
], hlist
) {
385 if (idx
< cb
->args
[0])
388 err
= vxlan_fdb_info(skb
, vxlan
, f
,
389 NETLINK_CB(cb
->skb
).portid
,
403 /* Watch incoming packets to learn mapping between Ethernet address
404 * and Tunnel endpoint.
406 static void vxlan_snoop(struct net_device
*dev
,
407 __be32 src_ip
, const u8
*src_mac
)
409 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
413 f
= vxlan_find_mac(vxlan
, src_mac
);
416 if (likely(f
->remote_ip
== src_ip
))
421 "%pM migrated from %pI4 to %pI4\n",
422 src_mac
, &f
->remote_ip
, &src_ip
);
424 f
->remote_ip
= src_ip
;
425 f
->updated
= jiffies
;
427 /* learned new entry */
428 spin_lock(&vxlan
->hash_lock
);
429 err
= vxlan_fdb_create(vxlan
, src_mac
, src_ip
,
431 NLM_F_EXCL
|NLM_F_CREATE
);
432 spin_unlock(&vxlan
->hash_lock
);
437 /* See if multicast group is already in use by other ID */
438 static bool vxlan_group_used(struct vxlan_net
*vn
,
439 const struct vxlan_dev
*this)
441 const struct vxlan_dev
*vxlan
;
442 struct hlist_node
*node
;
445 for (h
= 0; h
< VNI_HASH_SIZE
; ++h
)
446 hlist_for_each_entry(vxlan
, node
, &vn
->vni_list
[h
], hlist
) {
450 if (!netif_running(vxlan
->dev
))
453 if (vxlan
->gaddr
== this->gaddr
)
460 /* kernel equivalent to IP_ADD_MEMBERSHIP */
461 static int vxlan_join_group(struct net_device
*dev
)
463 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
464 struct vxlan_net
*vn
= net_generic(dev_net(dev
), vxlan_net_id
);
465 struct sock
*sk
= vn
->sock
->sk
;
466 struct ip_mreqn mreq
= {
467 .imr_multiaddr
.s_addr
= vxlan
->gaddr
,
471 /* Already a member of group */
472 if (vxlan_group_used(vn
, vxlan
))
475 /* Need to drop RTNL to call multicast join */
478 err
= ip_mc_join_group(sk
, &mreq
);
486 /* kernel equivalent to IP_DROP_MEMBERSHIP */
487 static int vxlan_leave_group(struct net_device
*dev
)
489 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
490 struct vxlan_net
*vn
= net_generic(dev_net(dev
), vxlan_net_id
);
492 struct sock
*sk
= vn
->sock
->sk
;
493 struct ip_mreqn mreq
= {
494 .imr_multiaddr
.s_addr
= vxlan
->gaddr
,
497 /* Only leave group when last vxlan is done. */
498 if (vxlan_group_used(vn
, vxlan
))
501 /* Need to drop RTNL to call multicast leave */
504 err
= ip_mc_leave_group(sk
, &mreq
);
511 /* Callback from net/ipv4/udp.c to receive packets */
512 static int vxlan_udp_encap_recv(struct sock
*sk
, struct sk_buff
*skb
)
515 struct vxlanhdr
*vxh
;
516 struct vxlan_dev
*vxlan
;
517 struct vxlan_stats
*stats
;
521 /* pop off outer UDP header */
522 __skb_pull(skb
, sizeof(struct udphdr
));
524 /* Need Vxlan and inner Ethernet header to be present */
525 if (!pskb_may_pull(skb
, sizeof(struct vxlanhdr
)))
528 /* Drop packets with reserved bits set */
529 vxh
= (struct vxlanhdr
*) skb
->data
;
530 if (vxh
->vx_flags
!= htonl(VXLAN_FLAGS
) ||
531 (vxh
->vx_vni
& htonl(0xff))) {
532 netdev_dbg(skb
->dev
, "invalid vxlan flags=%#x vni=%#x\n",
533 ntohl(vxh
->vx_flags
), ntohl(vxh
->vx_vni
));
537 __skb_pull(skb
, sizeof(struct vxlanhdr
));
538 skb_postpull_rcsum(skb
, eth_hdr(skb
), sizeof(struct vxlanhdr
));
540 /* Is this VNI defined? */
541 vni
= ntohl(vxh
->vx_vni
) >> 8;
542 vxlan
= vxlan_find_vni(sock_net(sk
), vni
);
544 netdev_dbg(skb
->dev
, "unknown vni %d\n", vni
);
548 if (!pskb_may_pull(skb
, ETH_HLEN
)) {
549 vxlan
->dev
->stats
.rx_length_errors
++;
550 vxlan
->dev
->stats
.rx_errors
++;
554 /* Re-examine inner Ethernet packet */
556 skb
->protocol
= eth_type_trans(skb
, vxlan
->dev
);
557 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
559 /* Ignore packet loops (and multicast echo) */
560 if (compare_ether_addr(eth_hdr(skb
)->h_source
,
561 vxlan
->dev
->dev_addr
) == 0)
565 vxlan_snoop(skb
->dev
, oip
->saddr
, eth_hdr(skb
)->h_source
);
567 __skb_tunnel_rx(skb
, vxlan
->dev
);
568 skb_reset_network_header(skb
);
570 err
= IP_ECN_decapsulate(oip
, skb
);
573 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
574 &oip
->saddr
, oip
->tos
);
576 ++vxlan
->dev
->stats
.rx_frame_errors
;
577 ++vxlan
->dev
->stats
.rx_errors
;
582 stats
= this_cpu_ptr(vxlan
->stats
);
583 u64_stats_update_begin(&stats
->syncp
);
585 stats
->rx_bytes
+= skb
->len
;
586 u64_stats_update_end(&stats
->syncp
);
592 /* Put UDP header back */
593 __skb_push(skb
, sizeof(struct udphdr
));
597 /* Consume bad packet */
602 /* Extract dsfield from inner protocol */
603 static inline u8
vxlan_get_dsfield(const struct iphdr
*iph
,
604 const struct sk_buff
*skb
)
606 if (skb
->protocol
== htons(ETH_P_IP
))
608 else if (skb
->protocol
== htons(ETH_P_IPV6
))
609 return ipv6_get_dsfield((const struct ipv6hdr
*)iph
);
614 /* Propogate ECN bits out */
615 static inline u8
vxlan_ecn_encap(u8 tos
,
616 const struct iphdr
*iph
,
617 const struct sk_buff
*skb
)
619 u8 inner
= vxlan_get_dsfield(iph
, skb
);
621 return INET_ECN_encapsulate(tos
, inner
);
624 /* Transmit local packets over Vxlan
626 * Outer IP header inherits ECN and DF from inner header.
627 * Outer UDP destination is the VXLAN assigned port.
628 * source port is based on hash of flow if available
629 * otherwise use a random value
631 static netdev_tx_t
vxlan_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
633 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
635 const struct ethhdr
*eth
;
636 const struct iphdr
*old_iph
;
638 struct vxlanhdr
*vxh
;
642 unsigned int pkt_len
= skb
->len
;
649 /* Need space for new headers (invalidates iph ptr) */
650 if (skb_cow_head(skb
, VXLAN_HEADROOM
))
653 eth
= (void *)skb
->data
;
654 old_iph
= ip_hdr(skb
);
656 if (!is_multicast_ether_addr(eth
->h_dest
) &&
657 (f
= vxlan_find_mac(vxlan
, eth
->h_dest
)))
659 else if (vxlan
->gaddr
) {
665 if (!ttl
&& IN_MULTICAST(ntohl(dst
)))
670 tos
= vxlan_get_dsfield(old_iph
, skb
);
672 hash
= skb_get_rxhash(skb
);
674 rt
= ip_route_output_gre(dev_net(dev
), &fl4
, dst
,
675 vxlan
->saddr
, vxlan
->vni
,
676 RT_TOS(tos
), vxlan
->link
);
678 netdev_dbg(dev
, "no route to %pI4\n", &dst
);
679 dev
->stats
.tx_carrier_errors
++;
683 if (rt
->dst
.dev
== dev
) {
684 netdev_dbg(dev
, "circular route to %pI4\n", &dst
);
686 dev
->stats
.collisions
++;
690 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
691 IPCB(skb
)->flags
&= ~(IPSKB_XFRM_TUNNEL_SIZE
| IPSKB_XFRM_TRANSFORMED
|
694 skb_dst_set(skb
, &rt
->dst
);
696 vxh
= (struct vxlanhdr
*) __skb_push(skb
, sizeof(*vxh
));
697 vxh
->vx_flags
= htonl(VXLAN_FLAGS
);
698 vxh
->vx_vni
= htonl(vxlan
->vni
<< 8);
700 __skb_push(skb
, sizeof(*uh
));
701 skb_reset_transport_header(skb
);
704 uh
->dest
= htons(vxlan_port
);
705 uh
->source
= hash
? :random32();
707 uh
->len
= htons(skb
->len
);
710 __skb_push(skb
, sizeof(*iph
));
711 skb_reset_network_header(skb
);
714 iph
->ihl
= sizeof(struct iphdr
) >> 2;
716 iph
->protocol
= IPPROTO_UDP
;
717 iph
->tos
= vxlan_ecn_encap(tos
, old_iph
, skb
);
718 iph
->daddr
= fl4
.daddr
;
719 iph
->saddr
= fl4
.saddr
;
720 iph
->ttl
= ttl
? : ip4_dst_hoplimit(&rt
->dst
);
722 /* See __IPTUNNEL_XMIT */
723 skb
->ip_summed
= CHECKSUM_NONE
;
724 ip_select_ident(iph
, &rt
->dst
, NULL
);
726 err
= ip_local_out(skb
);
727 if (likely(net_xmit_eval(err
) == 0)) {
728 struct vxlan_stats
*stats
= this_cpu_ptr(vxlan
->stats
);
730 u64_stats_update_begin(&stats
->syncp
);
732 stats
->tx_bytes
+= pkt_len
;
733 u64_stats_update_end(&stats
->syncp
);
735 dev
->stats
.tx_errors
++;
736 dev
->stats
.tx_aborted_errors
++;
741 dev
->stats
.tx_dropped
++;
745 dev
->stats
.tx_errors
++;
751 /* Walk the forwarding table and purge stale entries */
752 static void vxlan_cleanup(unsigned long arg
)
754 struct vxlan_dev
*vxlan
= (struct vxlan_dev
*) arg
;
755 unsigned long next_timer
= jiffies
+ FDB_AGE_INTERVAL
;
758 if (!netif_running(vxlan
->dev
))
761 spin_lock_bh(&vxlan
->hash_lock
);
762 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
763 struct hlist_node
*p
, *n
;
764 hlist_for_each_safe(p
, n
, &vxlan
->fdb_head
[h
]) {
766 = container_of(p
, struct vxlan_fdb
, hlist
);
767 unsigned long timeout
;
769 if (f
->state
== NUD_PERMANENT
)
772 timeout
= f
->used
+ vxlan
->age_interval
* HZ
;
773 if (time_before_eq(timeout
, jiffies
)) {
774 netdev_dbg(vxlan
->dev
,
775 "garbage collect %pM\n",
777 f
->state
= NUD_STALE
;
778 vxlan_fdb_destroy(vxlan
, f
);
779 } else if (time_before(timeout
, next_timer
))
780 next_timer
= timeout
;
783 spin_unlock_bh(&vxlan
->hash_lock
);
785 mod_timer(&vxlan
->age_timer
, next_timer
);
788 /* Setup stats when device is created */
789 static int vxlan_init(struct net_device
*dev
)
791 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
793 vxlan
->stats
= alloc_percpu(struct vxlan_stats
);
800 /* Start ageing timer and join group when device is brought up */
801 static int vxlan_open(struct net_device
*dev
)
803 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
807 err
= vxlan_join_group(dev
);
812 if (vxlan
->age_interval
)
813 mod_timer(&vxlan
->age_timer
, jiffies
+ FDB_AGE_INTERVAL
);
818 /* Purge the forwarding table */
819 static void vxlan_flush(struct vxlan_dev
*vxlan
)
823 spin_lock_bh(&vxlan
->hash_lock
);
824 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
) {
825 struct hlist_node
*p
, *n
;
826 hlist_for_each_safe(p
, n
, &vxlan
->fdb_head
[h
]) {
828 = container_of(p
, struct vxlan_fdb
, hlist
);
829 vxlan_fdb_destroy(vxlan
, f
);
832 spin_unlock_bh(&vxlan
->hash_lock
);
835 /* Cleanup timer and forwarding table on shutdown */
836 static int vxlan_stop(struct net_device
*dev
)
838 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
841 vxlan_leave_group(dev
);
843 del_timer_sync(&vxlan
->age_timer
);
850 /* Merge per-cpu statistics */
851 static struct rtnl_link_stats64
*vxlan_stats64(struct net_device
*dev
,
852 struct rtnl_link_stats64
*stats
)
854 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
855 struct vxlan_stats tmp
, sum
= { 0 };
858 for_each_possible_cpu(cpu
) {
860 const struct vxlan_stats
*stats
861 = per_cpu_ptr(vxlan
->stats
, cpu
);
864 start
= u64_stats_fetch_begin_bh(&stats
->syncp
);
865 memcpy(&tmp
, stats
, sizeof(tmp
));
866 } while (u64_stats_fetch_retry_bh(&stats
->syncp
, start
));
868 sum
.tx_bytes
+= tmp
.tx_bytes
;
869 sum
.tx_packets
+= tmp
.tx_packets
;
870 sum
.rx_bytes
+= tmp
.rx_bytes
;
871 sum
.rx_packets
+= tmp
.rx_packets
;
874 stats
->tx_bytes
= sum
.tx_bytes
;
875 stats
->tx_packets
= sum
.tx_packets
;
876 stats
->rx_bytes
= sum
.rx_bytes
;
877 stats
->rx_packets
= sum
.rx_packets
;
879 stats
->multicast
= dev
->stats
.multicast
;
880 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
881 stats
->rx_frame_errors
= dev
->stats
.rx_frame_errors
;
882 stats
->rx_errors
= dev
->stats
.rx_errors
;
884 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
885 stats
->tx_carrier_errors
= dev
->stats
.tx_carrier_errors
;
886 stats
->tx_aborted_errors
= dev
->stats
.tx_aborted_errors
;
887 stats
->collisions
= dev
->stats
.collisions
;
888 stats
->tx_errors
= dev
->stats
.tx_errors
;
893 /* Stub, nothing needs to be done. */
894 static void vxlan_set_multicast_list(struct net_device
*dev
)
898 static const struct net_device_ops vxlan_netdev_ops
= {
899 .ndo_init
= vxlan_init
,
900 .ndo_open
= vxlan_open
,
901 .ndo_stop
= vxlan_stop
,
902 .ndo_start_xmit
= vxlan_xmit
,
903 .ndo_get_stats64
= vxlan_stats64
,
904 .ndo_set_rx_mode
= vxlan_set_multicast_list
,
905 .ndo_change_mtu
= eth_change_mtu
,
906 .ndo_validate_addr
= eth_validate_addr
,
907 .ndo_set_mac_address
= eth_mac_addr
,
908 .ndo_fdb_add
= vxlan_fdb_add
,
909 .ndo_fdb_del
= vxlan_fdb_delete
,
910 .ndo_fdb_dump
= vxlan_fdb_dump
,
913 /* Info for udev, that this is a virtual tunnel endpoint */
914 static struct device_type vxlan_type
= {
918 static void vxlan_free(struct net_device
*dev
)
920 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
922 free_percpu(vxlan
->stats
);
926 /* Initialize the device structure. */
927 static void vxlan_setup(struct net_device
*dev
)
929 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
932 eth_hw_addr_random(dev
);
935 dev
->netdev_ops
= &vxlan_netdev_ops
;
936 dev
->destructor
= vxlan_free
;
937 SET_NETDEV_DEVTYPE(dev
, &vxlan_type
);
939 dev
->tx_queue_len
= 0;
940 dev
->features
|= NETIF_F_LLTX
;
941 dev
->features
|= NETIF_F_NETNS_LOCAL
;
942 dev
->priv_flags
&= ~IFF_XMIT_DST_RELEASE
;
944 spin_lock_init(&vxlan
->hash_lock
);
946 init_timer_deferrable(&vxlan
->age_timer
);
947 vxlan
->age_timer
.function
= vxlan_cleanup
;
948 vxlan
->age_timer
.data
= (unsigned long) vxlan
;
952 for (h
= 0; h
< FDB_HASH_SIZE
; ++h
)
953 INIT_HLIST_HEAD(&vxlan
->fdb_head
[h
]);
956 static const struct nla_policy vxlan_policy
[IFLA_VXLAN_MAX
+ 1] = {
957 [IFLA_VXLAN_ID
] = { .type
= NLA_U32
},
958 [IFLA_VXLAN_GROUP
] = { .len
= FIELD_SIZEOF(struct iphdr
, daddr
) },
959 [IFLA_VXLAN_LINK
] = { .type
= NLA_U32
},
960 [IFLA_VXLAN_LOCAL
] = { .len
= FIELD_SIZEOF(struct iphdr
, saddr
) },
961 [IFLA_VXLAN_TOS
] = { .type
= NLA_U8
},
962 [IFLA_VXLAN_TTL
] = { .type
= NLA_U8
},
963 [IFLA_VXLAN_LEARNING
] = { .type
= NLA_U8
},
964 [IFLA_VXLAN_AGEING
] = { .type
= NLA_U32
},
965 [IFLA_VXLAN_LIMIT
] = { .type
= NLA_U32
},
968 static int vxlan_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
970 if (tb
[IFLA_ADDRESS
]) {
971 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
) {
972 pr_debug("invalid link address (not ethernet)\n");
976 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
]))) {
977 pr_debug("invalid all zero ethernet address\n");
978 return -EADDRNOTAVAIL
;
985 if (data
[IFLA_VXLAN_ID
]) {
986 __u32 id
= nla_get_u32(data
[IFLA_VXLAN_ID
]);
987 if (id
>= VXLAN_VID_MASK
)
991 if (data
[IFLA_VXLAN_GROUP
]) {
992 __be32 gaddr
= nla_get_be32(data
[IFLA_VXLAN_GROUP
]);
993 if (!IN_MULTICAST(ntohl(gaddr
))) {
994 pr_debug("group address is not IPv4 multicast\n");
995 return -EADDRNOTAVAIL
;
1001 static int vxlan_newlink(struct net
*net
, struct net_device
*dev
,
1002 struct nlattr
*tb
[], struct nlattr
*data
[])
1004 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1008 if (!data
[IFLA_VXLAN_ID
])
1011 vni
= nla_get_u32(data
[IFLA_VXLAN_ID
]);
1012 if (vxlan_find_vni(net
, vni
)) {
1013 pr_info("duplicate VNI %u\n", vni
);
1018 if (data
[IFLA_VXLAN_GROUP
])
1019 vxlan
->gaddr
= nla_get_be32(data
[IFLA_VXLAN_GROUP
]);
1021 if (data
[IFLA_VXLAN_LOCAL
])
1022 vxlan
->saddr
= nla_get_be32(data
[IFLA_VXLAN_LOCAL
]);
1024 if (data
[IFLA_VXLAN_LINK
]) {
1025 vxlan
->link
= nla_get_u32(data
[IFLA_VXLAN_LINK
]);
1027 if (!tb
[IFLA_MTU
]) {
1028 struct net_device
*lowerdev
;
1029 lowerdev
= __dev_get_by_index(net
, vxlan
->link
);
1030 dev
->mtu
= lowerdev
->mtu
- VXLAN_HEADROOM
;
1034 if (data
[IFLA_VXLAN_TOS
])
1035 vxlan
->tos
= nla_get_u8(data
[IFLA_VXLAN_TOS
]);
1037 if (!data
[IFLA_VXLAN_LEARNING
] || nla_get_u8(data
[IFLA_VXLAN_LEARNING
]))
1038 vxlan
->learn
= true;
1040 if (data
[IFLA_VXLAN_AGEING
])
1041 vxlan
->age_interval
= nla_get_u32(data
[IFLA_VXLAN_AGEING
]);
1043 vxlan
->age_interval
= FDB_AGE_DEFAULT
;
1045 if (data
[IFLA_VXLAN_LIMIT
])
1046 vxlan
->addrmax
= nla_get_u32(data
[IFLA_VXLAN_LIMIT
]);
1048 err
= register_netdevice(dev
);
1050 hlist_add_head_rcu(&vxlan
->hlist
, vni_head(net
, vxlan
->vni
));
1055 static void vxlan_dellink(struct net_device
*dev
, struct list_head
*head
)
1057 struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1059 hlist_del_rcu(&vxlan
->hlist
);
1061 unregister_netdevice_queue(dev
, head
);
1064 static size_t vxlan_get_size(const struct net_device
*dev
)
1067 return nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_ID */
1068 nla_total_size(sizeof(__be32
)) +/* IFLA_VXLAN_GROUP */
1069 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_LINK */
1070 nla_total_size(sizeof(__be32
))+ /* IFLA_VXLAN_LOCAL */
1071 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_TTL */
1072 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_TOS */
1073 nla_total_size(sizeof(__u8
)) + /* IFLA_VXLAN_LEARNING */
1074 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_AGEING */
1075 nla_total_size(sizeof(__u32
)) + /* IFLA_VXLAN_LIMIT */
1079 static int vxlan_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
1081 const struct vxlan_dev
*vxlan
= netdev_priv(dev
);
1083 if (nla_put_u32(skb
, IFLA_VXLAN_ID
, vxlan
->vni
))
1084 goto nla_put_failure
;
1086 if (vxlan
->gaddr
&& nla_put_be32(skb
, IFLA_VXLAN_GROUP
, vxlan
->gaddr
))
1087 goto nla_put_failure
;
1089 if (vxlan
->link
&& nla_put_u32(skb
, IFLA_VXLAN_LINK
, vxlan
->link
))
1090 goto nla_put_failure
;
1092 if (vxlan
->saddr
&& nla_put_be32(skb
, IFLA_VXLAN_LOCAL
, vxlan
->saddr
))
1093 goto nla_put_failure
;
1095 if (nla_put_u8(skb
, IFLA_VXLAN_TTL
, vxlan
->ttl
) ||
1096 nla_put_u8(skb
, IFLA_VXLAN_TOS
, vxlan
->tos
) ||
1097 nla_put_u8(skb
, IFLA_VXLAN_LEARNING
, vxlan
->learn
) ||
1098 nla_put_u32(skb
, IFLA_VXLAN_AGEING
, vxlan
->age_interval
) ||
1099 nla_put_u32(skb
, IFLA_VXLAN_LIMIT
, vxlan
->addrmax
))
1100 goto nla_put_failure
;
1108 static struct rtnl_link_ops vxlan_link_ops __read_mostly
= {
1110 .maxtype
= IFLA_VXLAN_MAX
,
1111 .policy
= vxlan_policy
,
1112 .priv_size
= sizeof(struct vxlan_dev
),
1113 .setup
= vxlan_setup
,
1114 .validate
= vxlan_validate
,
1115 .newlink
= vxlan_newlink
,
1116 .dellink
= vxlan_dellink
,
1117 .get_size
= vxlan_get_size
,
1118 .fill_info
= vxlan_fill_info
,
1121 static __net_init
int vxlan_init_net(struct net
*net
)
1123 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
1125 struct sockaddr_in vxlan_addr
= {
1126 .sin_family
= AF_INET
,
1127 .sin_addr
.s_addr
= htonl(INADDR_ANY
),
1132 /* Create UDP socket for encapsulation receive. */
1133 rc
= sock_create_kern(AF_INET
, SOCK_DGRAM
, IPPROTO_UDP
, &vn
->sock
);
1135 pr_debug("UDP socket create failed\n");
1138 /* Put in proper namespace */
1140 sk_change_net(sk
, net
);
1142 vxlan_addr
.sin_port
= htons(vxlan_port
);
1144 rc
= kernel_bind(vn
->sock
, (struct sockaddr
*) &vxlan_addr
,
1145 sizeof(vxlan_addr
));
1147 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1148 &vxlan_addr
.sin_addr
, ntohs(vxlan_addr
.sin_port
), rc
);
1149 sk_release_kernel(sk
);
1154 /* Disable multicast loopback */
1155 inet_sk(sk
)->mc_loop
= 0;
1157 /* Mark socket as an encapsulation socket. */
1158 udp_sk(sk
)->encap_type
= 1;
1159 udp_sk(sk
)->encap_rcv
= vxlan_udp_encap_recv
;
1162 for (h
= 0; h
< VNI_HASH_SIZE
; ++h
)
1163 INIT_HLIST_HEAD(&vn
->vni_list
[h
]);
1168 static __net_exit
void vxlan_exit_net(struct net
*net
)
1170 struct vxlan_net
*vn
= net_generic(net
, vxlan_net_id
);
1173 sk_release_kernel(vn
->sock
->sk
);
1178 static struct pernet_operations vxlan_net_ops
= {
1179 .init
= vxlan_init_net
,
1180 .exit
= vxlan_exit_net
,
1181 .id
= &vxlan_net_id
,
1182 .size
= sizeof(struct vxlan_net
),
1185 static int __init
vxlan_init_module(void)
1189 get_random_bytes(&vxlan_salt
, sizeof(vxlan_salt
));
1191 rc
= register_pernet_device(&vxlan_net_ops
);
1195 rc
= rtnl_link_register(&vxlan_link_ops
);
1202 unregister_pernet_device(&vxlan_net_ops
);
1206 module_init(vxlan_init_module
);
1208 static void __exit
vxlan_cleanup_module(void)
1210 rtnl_link_unregister(&vxlan_link_ops
);
1211 unregister_pernet_device(&vxlan_net_ops
);
1213 module_exit(vxlan_cleanup_module
);
1215 MODULE_LICENSE("GPL");
1216 MODULE_VERSION(VXLAN_VERSION
);
1217 MODULE_AUTHOR("Stephen Hemminger <shemminger@vyatta.com>");
1218 MODULE_ALIAS_RTNL_LINK("vxlan");