2 * vrf.c: device driver to encapsulate a VRF space
4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
8 * Based on dummy, team and ipvlan drivers
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
21 #include <linux/init.h>
22 #include <linux/moduleparam.h>
23 #include <linux/netfilter.h>
24 #include <linux/rtnetlink.h>
25 #include <net/rtnetlink.h>
26 #include <linux/u64_stats_sync.h>
27 #include <linux/hashtable.h>
29 #include <linux/inetdevice.h>
32 #include <net/ip_fib.h>
33 #include <net/ip6_route.h>
34 #include <net/rtnetlink.h>
35 #include <net/route.h>
36 #include <net/addrconf.h>
39 #define DRV_NAME "vrf"
40 #define DRV_VERSION "1.0"
42 #define vrf_is_slave(dev) ((dev)->flags & IFF_SLAVE)
44 #define vrf_master_get_rcu(dev) \
45 ((struct net_device *)rcu_dereference(dev->rx_handler_data))
53 struct u64_stats_sync syncp
;
56 static struct dst_entry
*vrf_ip_check(struct dst_entry
*dst
, u32 cookie
)
61 static int vrf_ip_local_out(struct sk_buff
*skb
)
63 return ip_local_out(skb
);
66 static unsigned int vrf_v4_mtu(const struct dst_entry
*dst
)
68 /* TO-DO: return max ethernet size? */
72 static void vrf_dst_destroy(struct dst_entry
*dst
)
74 /* our dst lives forever - or until the device is closed */
77 static unsigned int vrf_default_advmss(const struct dst_entry
*dst
)
82 static struct dst_ops vrf_dst_ops
= {
84 .local_out
= vrf_ip_local_out
,
85 .check
= vrf_ip_check
,
87 .destroy
= vrf_dst_destroy
,
88 .default_advmss
= vrf_default_advmss
,
91 static bool is_ip_rx_frame(struct sk_buff
*skb
)
93 switch (skb
->protocol
) {
95 case htons(ETH_P_IPV6
):
101 static void vrf_tx_error(struct net_device
*vrf_dev
, struct sk_buff
*skb
)
103 vrf_dev
->stats
.tx_errors
++;
107 /* note: already called with rcu_read_lock */
108 static rx_handler_result_t
vrf_handle_frame(struct sk_buff
**pskb
)
110 struct sk_buff
*skb
= *pskb
;
112 if (is_ip_rx_frame(skb
)) {
113 struct net_device
*dev
= vrf_master_get_rcu(skb
->dev
);
114 struct pcpu_dstats
*dstats
= this_cpu_ptr(dev
->dstats
);
116 u64_stats_update_begin(&dstats
->syncp
);
118 dstats
->rx_bytes
+= skb
->len
;
119 u64_stats_update_end(&dstats
->syncp
);
123 return RX_HANDLER_ANOTHER
;
125 return RX_HANDLER_PASS
;
128 static struct rtnl_link_stats64
*vrf_get_stats64(struct net_device
*dev
,
129 struct rtnl_link_stats64
*stats
)
133 for_each_possible_cpu(i
) {
134 const struct pcpu_dstats
*dstats
;
135 u64 tbytes
, tpkts
, tdrops
, rbytes
, rpkts
;
138 dstats
= per_cpu_ptr(dev
->dstats
, i
);
140 start
= u64_stats_fetch_begin_irq(&dstats
->syncp
);
141 tbytes
= dstats
->tx_bytes
;
142 tpkts
= dstats
->tx_pkts
;
143 tdrops
= dstats
->tx_drps
;
144 rbytes
= dstats
->rx_bytes
;
145 rpkts
= dstats
->rx_pkts
;
146 } while (u64_stats_fetch_retry_irq(&dstats
->syncp
, start
));
147 stats
->tx_bytes
+= tbytes
;
148 stats
->tx_packets
+= tpkts
;
149 stats
->tx_dropped
+= tdrops
;
150 stats
->rx_bytes
+= rbytes
;
151 stats
->rx_packets
+= rpkts
;
156 static netdev_tx_t
vrf_process_v6_outbound(struct sk_buff
*skb
,
157 struct net_device
*dev
)
159 vrf_tx_error(dev
, skb
);
160 return NET_XMIT_DROP
;
163 static int vrf_send_v4_prep(struct sk_buff
*skb
, struct flowi4
*fl4
,
164 struct net_device
*vrf_dev
)
169 rt
= ip_route_output_flow(dev_net(vrf_dev
), fl4
, NULL
);
173 /* TO-DO: what about broadcast ? */
174 if (rt
->rt_type
!= RTN_UNICAST
&& rt
->rt_type
!= RTN_LOCAL
) {
180 skb_dst_set(skb
, &rt
->dst
);
186 static netdev_tx_t
vrf_process_v4_outbound(struct sk_buff
*skb
,
187 struct net_device
*vrf_dev
)
189 struct iphdr
*ip4h
= ip_hdr(skb
);
190 int ret
= NET_XMIT_DROP
;
191 struct flowi4 fl4
= {
192 /* needed to match OIF rule */
193 .flowi4_oif
= vrf_dev
->ifindex
,
194 .flowi4_iif
= LOOPBACK_IFINDEX
,
195 .flowi4_tos
= RT_TOS(ip4h
->tos
),
196 .flowi4_flags
= FLOWI_FLAG_ANYSRC
| FLOWI_FLAG_VRFSRC
,
197 .daddr
= ip4h
->daddr
,
200 if (vrf_send_v4_prep(skb
, &fl4
, vrf_dev
))
204 ip4h
->saddr
= inet_select_addr(skb_dst(skb
)->dev
, 0,
208 ret
= ip_local_out(skb
);
209 if (unlikely(net_xmit_eval(ret
)))
210 vrf_dev
->stats
.tx_errors
++;
212 ret
= NET_XMIT_SUCCESS
;
217 vrf_tx_error(vrf_dev
, skb
);
221 static netdev_tx_t
is_ip_tx_frame(struct sk_buff
*skb
, struct net_device
*dev
)
223 /* strip the ethernet header added for pass through VRF device */
224 __skb_pull(skb
, skb_network_offset(skb
));
226 switch (skb
->protocol
) {
227 case htons(ETH_P_IP
):
228 return vrf_process_v4_outbound(skb
, dev
);
229 case htons(ETH_P_IPV6
):
230 return vrf_process_v6_outbound(skb
, dev
);
232 vrf_tx_error(dev
, skb
);
233 return NET_XMIT_DROP
;
237 static netdev_tx_t
vrf_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
239 netdev_tx_t ret
= is_ip_tx_frame(skb
, dev
);
241 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
242 struct pcpu_dstats
*dstats
= this_cpu_ptr(dev
->dstats
);
244 u64_stats_update_begin(&dstats
->syncp
);
246 dstats
->tx_bytes
+= skb
->len
;
247 u64_stats_update_end(&dstats
->syncp
);
249 this_cpu_inc(dev
->dstats
->tx_drps
);
255 /* modelled after ip_finish_output2 */
256 static int vrf_finish_output(struct sock
*sk
, struct sk_buff
*skb
)
258 struct dst_entry
*dst
= skb_dst(skb
);
259 struct rtable
*rt
= (struct rtable
*)dst
;
260 struct net_device
*dev
= dst
->dev
;
261 unsigned int hh_len
= LL_RESERVED_SPACE(dev
);
262 struct neighbour
*neigh
;
266 /* Be paranoid, rather than too clever. */
267 if (unlikely(skb_headroom(skb
) < hh_len
&& dev
->header_ops
)) {
268 struct sk_buff
*skb2
;
270 skb2
= skb_realloc_headroom(skb
, LL_RESERVED_SPACE(dev
));
276 skb_set_owner_w(skb2
, skb
->sk
);
284 nexthop
= (__force u32
)rt_nexthop(rt
, ip_hdr(skb
)->daddr
);
285 neigh
= __ipv4_neigh_lookup_noref(dev
, nexthop
);
286 if (unlikely(!neigh
))
287 neigh
= __neigh_create(&arp_tbl
, &nexthop
, dev
, false);
289 ret
= dst_neigh_output(dst
, neigh
, skb
);
291 rcu_read_unlock_bh();
293 if (unlikely(ret
< 0))
294 vrf_tx_error(skb
->dev
, skb
);
298 static int vrf_output(struct sock
*sk
, struct sk_buff
*skb
)
300 struct net_device
*dev
= skb_dst(skb
)->dev
;
302 IP_UPD_PO_STATS(dev_net(dev
), IPSTATS_MIB_OUT
, skb
->len
);
305 skb
->protocol
= htons(ETH_P_IP
);
307 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
, sk
, skb
,
310 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
313 static void vrf_rtable_destroy(struct net_vrf
*vrf
)
315 struct dst_entry
*dst
= (struct dst_entry
*)vrf
->rth
;
321 static struct rtable
*vrf_rtable_create(struct net_device
*dev
)
325 rth
= dst_alloc(&vrf_dst_ops
, dev
, 2,
327 (DST_HOST
| DST_NOPOLICY
| DST_NOXFRM
));
329 rth
->dst
.output
= vrf_output
;
330 rth
->rt_genid
= rt_genid_ipv4(dev_net(dev
));
332 rth
->rt_type
= RTN_UNICAST
;
333 rth
->rt_is_input
= 0;
337 rth
->rt_uses_gateway
= 0;
338 INIT_LIST_HEAD(&rth
->rt_uncached
);
339 rth
->rt_uncached_list
= NULL
;
345 /**************************** device handling ********************/
347 /* cycle interface to flush neighbor cache and move routes across tables */
348 static void cycle_netdev(struct net_device
*dev
)
350 unsigned int flags
= dev
->flags
;
353 if (!netif_running(dev
))
356 ret
= dev_change_flags(dev
, flags
& ~IFF_UP
);
358 ret
= dev_change_flags(dev
, flags
);
362 "Failed to cycle device %s; route tables might be wrong!\n",
367 static struct slave
*__vrf_find_slave_dev(struct slave_queue
*queue
,
368 struct net_device
*dev
)
370 struct list_head
*head
= &queue
->all_slaves
;
373 list_for_each_entry(slave
, head
, list
) {
374 if (slave
->dev
== dev
)
381 /* inverse of __vrf_insert_slave */
382 static void __vrf_remove_slave(struct slave_queue
*queue
, struct slave
*slave
)
384 list_del(&slave
->list
);
387 static void __vrf_insert_slave(struct slave_queue
*queue
, struct slave
*slave
)
389 list_add(&slave
->list
, &queue
->all_slaves
);
392 static int do_vrf_add_slave(struct net_device
*dev
, struct net_device
*port_dev
)
394 struct net_vrf_dev
*vrf_ptr
= kmalloc(sizeof(*vrf_ptr
), GFP_KERNEL
);
395 struct slave
*slave
= kzalloc(sizeof(*slave
), GFP_KERNEL
);
396 struct net_vrf
*vrf
= netdev_priv(dev
);
397 struct slave_queue
*queue
= &vrf
->queue
;
400 if (!slave
|| !vrf_ptr
)
403 slave
->dev
= port_dev
;
404 vrf_ptr
->ifindex
= dev
->ifindex
;
405 vrf_ptr
->tb_id
= vrf
->tb_id
;
407 /* register the packet handler for slave ports */
408 ret
= netdev_rx_handler_register(port_dev
, vrf_handle_frame
, dev
);
411 "Device %s failed to register rx_handler\n",
416 ret
= netdev_master_upper_dev_link(port_dev
, dev
);
420 port_dev
->flags
|= IFF_SLAVE
;
421 __vrf_insert_slave(queue
, slave
);
422 rcu_assign_pointer(port_dev
->vrf_ptr
, vrf_ptr
);
423 cycle_netdev(port_dev
);
428 netdev_rx_handler_unregister(port_dev
);
435 static int vrf_add_slave(struct net_device
*dev
, struct net_device
*port_dev
)
437 if (netif_is_vrf(port_dev
) || vrf_is_slave(port_dev
))
440 return do_vrf_add_slave(dev
, port_dev
);
443 /* inverse of do_vrf_add_slave */
444 static int do_vrf_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
446 struct net_vrf_dev
*vrf_ptr
= rtnl_dereference(port_dev
->vrf_ptr
);
447 struct net_vrf
*vrf
= netdev_priv(dev
);
448 struct slave_queue
*queue
= &vrf
->queue
;
451 RCU_INIT_POINTER(port_dev
->vrf_ptr
, NULL
);
453 netdev_upper_dev_unlink(port_dev
, dev
);
454 port_dev
->flags
&= ~IFF_SLAVE
;
456 netdev_rx_handler_unregister(port_dev
);
458 /* after netdev_rx_handler_unregister for synchronize_rcu */
461 cycle_netdev(port_dev
);
463 slave
= __vrf_find_slave_dev(queue
, port_dev
);
465 __vrf_remove_slave(queue
, slave
);
472 static int vrf_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
474 return do_vrf_del_slave(dev
, port_dev
);
477 static void vrf_dev_uninit(struct net_device
*dev
)
479 struct net_vrf
*vrf
= netdev_priv(dev
);
480 struct slave_queue
*queue
= &vrf
->queue
;
481 struct list_head
*head
= &queue
->all_slaves
;
482 struct slave
*slave
, *next
;
484 vrf_rtable_destroy(vrf
);
486 list_for_each_entry_safe(slave
, next
, head
, list
)
487 vrf_del_slave(dev
, slave
->dev
);
489 free_percpu(dev
->dstats
);
493 static int vrf_dev_init(struct net_device
*dev
)
495 struct net_vrf
*vrf
= netdev_priv(dev
);
497 INIT_LIST_HEAD(&vrf
->queue
.all_slaves
);
499 dev
->dstats
= netdev_alloc_pcpu_stats(struct pcpu_dstats
);
503 /* create the default dst which points back to us */
504 vrf
->rth
= vrf_rtable_create(dev
);
508 dev
->flags
= IFF_MASTER
| IFF_NOARP
;
513 free_percpu(dev
->dstats
);
519 static const struct net_device_ops vrf_netdev_ops
= {
520 .ndo_init
= vrf_dev_init
,
521 .ndo_uninit
= vrf_dev_uninit
,
522 .ndo_start_xmit
= vrf_xmit
,
523 .ndo_get_stats64
= vrf_get_stats64
,
524 .ndo_add_slave
= vrf_add_slave
,
525 .ndo_del_slave
= vrf_del_slave
,
528 static void vrf_get_drvinfo(struct net_device
*dev
,
529 struct ethtool_drvinfo
*info
)
531 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
532 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
535 static const struct ethtool_ops vrf_ethtool_ops
= {
536 .get_drvinfo
= vrf_get_drvinfo
,
539 static void vrf_setup(struct net_device
*dev
)
543 /* Initialize the device structure. */
544 dev
->netdev_ops
= &vrf_netdev_ops
;
545 dev
->ethtool_ops
= &vrf_ethtool_ops
;
546 dev
->destructor
= free_netdev
;
548 /* Fill in device structure with ethernet-generic values. */
549 eth_hw_addr_random(dev
);
551 /* don't acquire vrf device's netif_tx_lock when transmitting */
552 dev
->features
|= NETIF_F_LLTX
;
554 /* don't allow vrf devices to change network namespaces. */
555 dev
->features
|= NETIF_F_NETNS_LOCAL
;
558 static int vrf_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
560 if (tb
[IFLA_ADDRESS
]) {
561 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
563 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
564 return -EADDRNOTAVAIL
;
569 static void vrf_dellink(struct net_device
*dev
, struct list_head
*head
)
571 struct net_vrf_dev
*vrf_ptr
= rtnl_dereference(dev
->vrf_ptr
);
573 RCU_INIT_POINTER(dev
->vrf_ptr
, NULL
);
574 kfree_rcu(vrf_ptr
, rcu
);
575 unregister_netdevice_queue(dev
, head
);
578 static int vrf_newlink(struct net
*src_net
, struct net_device
*dev
,
579 struct nlattr
*tb
[], struct nlattr
*data
[])
581 struct net_vrf
*vrf
= netdev_priv(dev
);
582 struct net_vrf_dev
*vrf_ptr
;
585 if (!data
|| !data
[IFLA_VRF_TABLE
])
588 vrf
->tb_id
= nla_get_u32(data
[IFLA_VRF_TABLE
]);
590 dev
->priv_flags
|= IFF_VRF_MASTER
;
593 vrf_ptr
= kmalloc(sizeof(*dev
->vrf_ptr
), GFP_KERNEL
);
597 vrf_ptr
->ifindex
= dev
->ifindex
;
598 vrf_ptr
->tb_id
= vrf
->tb_id
;
600 err
= register_netdevice(dev
);
604 rcu_assign_pointer(dev
->vrf_ptr
, vrf_ptr
);
614 static size_t vrf_nl_getsize(const struct net_device
*dev
)
616 return nla_total_size(sizeof(u32
)); /* IFLA_VRF_TABLE */
619 static int vrf_fillinfo(struct sk_buff
*skb
,
620 const struct net_device
*dev
)
622 struct net_vrf
*vrf
= netdev_priv(dev
);
624 return nla_put_u32(skb
, IFLA_VRF_TABLE
, vrf
->tb_id
);
627 static const struct nla_policy vrf_nl_policy
[IFLA_VRF_MAX
+ 1] = {
628 [IFLA_VRF_TABLE
] = { .type
= NLA_U32
},
631 static struct rtnl_link_ops vrf_link_ops __read_mostly
= {
633 .priv_size
= sizeof(struct net_vrf
),
635 .get_size
= vrf_nl_getsize
,
636 .policy
= vrf_nl_policy
,
637 .validate
= vrf_validate
,
638 .fill_info
= vrf_fillinfo
,
640 .newlink
= vrf_newlink
,
641 .dellink
= vrf_dellink
,
643 .maxtype
= IFLA_VRF_MAX
,
646 static int vrf_device_event(struct notifier_block
*unused
,
647 unsigned long event
, void *ptr
)
649 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
651 /* only care about unregister events to drop slave references */
652 if (event
== NETDEV_UNREGISTER
) {
653 struct net_vrf_dev
*vrf_ptr
= rtnl_dereference(dev
->vrf_ptr
);
654 struct net_device
*vrf_dev
;
656 if (!vrf_ptr
|| netif_is_vrf(dev
))
659 vrf_dev
= netdev_master_upper_dev_get(dev
);
660 vrf_del_slave(vrf_dev
, dev
);
666 static struct notifier_block vrf_notifier_block __read_mostly
= {
667 .notifier_call
= vrf_device_event
,
670 static int __init
vrf_init_module(void)
674 vrf_dst_ops
.kmem_cachep
=
675 kmem_cache_create("vrf_ip_dst_cache",
676 sizeof(struct rtable
), 0,
680 if (!vrf_dst_ops
.kmem_cachep
)
683 register_netdevice_notifier(&vrf_notifier_block
);
685 rc
= rtnl_link_register(&vrf_link_ops
);
692 unregister_netdevice_notifier(&vrf_notifier_block
);
693 kmem_cache_destroy(vrf_dst_ops
.kmem_cachep
);
697 static void __exit
vrf_cleanup_module(void)
699 rtnl_link_unregister(&vrf_link_ops
);
700 unregister_netdevice_notifier(&vrf_notifier_block
);
701 kmem_cache_destroy(vrf_dst_ops
.kmem_cachep
);
704 module_init(vrf_init_module
);
705 module_exit(vrf_cleanup_module
);
706 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
707 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
708 MODULE_LICENSE("GPL");
709 MODULE_ALIAS_RTNL_LINK(DRV_NAME
);
710 MODULE_VERSION(DRV_VERSION
);