2 * vrf.c: device driver to encapsulate a VRF space
4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
8 * Based on dummy, team and ipvlan drivers
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
21 #include <linux/init.h>
22 #include <linux/moduleparam.h>
23 #include <linux/netfilter.h>
24 #include <linux/rtnetlink.h>
25 #include <net/rtnetlink.h>
26 #include <linux/u64_stats_sync.h>
27 #include <linux/hashtable.h>
29 #include <linux/inetdevice.h>
32 #include <net/ip_fib.h>
33 #include <net/ip6_fib.h>
34 #include <net/ip6_route.h>
35 #include <net/route.h>
36 #include <net/addrconf.h>
37 #include <net/l3mdev.h>
38 #include <net/fib_rules.h>
40 #define RT_FL_TOS(oldflp4) \
41 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
43 #define DRV_NAME "vrf"
44 #define DRV_VERSION "1.0"
46 #define FIB_RULE_PREF 1000 /* default preference for FIB rules */
47 static bool add_fib_rules
= true;
50 struct rtable __rcu
*rth
;
51 struct rtable __rcu
*rth_local
;
52 struct rt6_info __rcu
*rt6
;
53 struct rt6_info __rcu
*rt6_local
;
64 struct u64_stats_sync syncp
;
67 static void vrf_rx_stats(struct net_device
*dev
, int len
)
69 struct pcpu_dstats
*dstats
= this_cpu_ptr(dev
->dstats
);
71 u64_stats_update_begin(&dstats
->syncp
);
73 dstats
->rx_bytes
+= len
;
74 u64_stats_update_end(&dstats
->syncp
);
77 static void vrf_tx_error(struct net_device
*vrf_dev
, struct sk_buff
*skb
)
79 vrf_dev
->stats
.tx_errors
++;
83 static struct rtnl_link_stats64
*vrf_get_stats64(struct net_device
*dev
,
84 struct rtnl_link_stats64
*stats
)
88 for_each_possible_cpu(i
) {
89 const struct pcpu_dstats
*dstats
;
90 u64 tbytes
, tpkts
, tdrops
, rbytes
, rpkts
;
93 dstats
= per_cpu_ptr(dev
->dstats
, i
);
95 start
= u64_stats_fetch_begin_irq(&dstats
->syncp
);
96 tbytes
= dstats
->tx_bytes
;
97 tpkts
= dstats
->tx_pkts
;
98 tdrops
= dstats
->tx_drps
;
99 rbytes
= dstats
->rx_bytes
;
100 rpkts
= dstats
->rx_pkts
;
101 } while (u64_stats_fetch_retry_irq(&dstats
->syncp
, start
));
102 stats
->tx_bytes
+= tbytes
;
103 stats
->tx_packets
+= tpkts
;
104 stats
->tx_dropped
+= tdrops
;
105 stats
->rx_bytes
+= rbytes
;
106 stats
->rx_packets
+= rpkts
;
111 /* Local traffic destined to local address. Reinsert the packet to rx
112 * path, similar to loopback handling.
114 static int vrf_local_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
115 struct dst_entry
*dst
)
121 skb_dst_set(skb
, dst
);
124 /* set pkt_type to avoid skb hitting packet taps twice -
125 * once on Tx and again in Rx processing
127 skb
->pkt_type
= PACKET_LOOPBACK
;
129 skb
->protocol
= eth_type_trans(skb
, dev
);
131 if (likely(netif_rx(skb
) == NET_RX_SUCCESS
))
132 vrf_rx_stats(dev
, len
);
134 this_cpu_inc(dev
->dstats
->rx_drps
);
139 #if IS_ENABLED(CONFIG_IPV6)
140 static netdev_tx_t
vrf_process_v6_outbound(struct sk_buff
*skb
,
141 struct net_device
*dev
)
143 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
144 struct net
*net
= dev_net(skb
->dev
);
145 struct flowi6 fl6
= {
146 /* needed to match OIF rule */
147 .flowi6_oif
= dev
->ifindex
,
148 .flowi6_iif
= LOOPBACK_IFINDEX
,
151 .flowlabel
= ip6_flowinfo(iph
),
152 .flowi6_mark
= skb
->mark
,
153 .flowi6_proto
= iph
->nexthdr
,
154 .flowi6_flags
= FLOWI_FLAG_L3MDEV_SRC
| FLOWI_FLAG_SKIP_NH_OIF
,
156 int ret
= NET_XMIT_DROP
;
157 struct dst_entry
*dst
;
158 struct dst_entry
*dst_null
= &net
->ipv6
.ip6_null_entry
->dst
;
160 dst
= ip6_route_output(net
, NULL
, &fl6
);
166 /* if dst.dev is loopback or the VRF device again this is locally
167 * originated traffic destined to a local address. Short circuit
168 * to Rx path using our local dst
170 if (dst
->dev
== net
->loopback_dev
|| dst
->dev
== dev
) {
171 struct net_vrf
*vrf
= netdev_priv(dev
);
172 struct rt6_info
*rt6_local
;
174 /* release looked up dst and use cached local dst */
179 rt6_local
= rcu_dereference(vrf
->rt6_local
);
180 if (unlikely(!rt6_local
)) {
185 /* Ordering issue: cached local dst is created on newlink
186 * before the IPv6 initialization. Using the local dst
187 * requires rt6i_idev to be set so make sure it is.
189 if (unlikely(!rt6_local
->rt6i_idev
)) {
190 rt6_local
->rt6i_idev
= in6_dev_get(dev
);
191 if (!rt6_local
->rt6i_idev
) {
197 dst
= &rt6_local
->dst
;
202 return vrf_local_xmit(skb
, dev
, &rt6_local
->dst
);
205 skb_dst_set(skb
, dst
);
207 /* strip the ethernet header added for pass through VRF device */
208 __skb_pull(skb
, skb_network_offset(skb
));
210 ret
= ip6_local_out(net
, skb
->sk
, skb
);
211 if (unlikely(net_xmit_eval(ret
)))
212 dev
->stats
.tx_errors
++;
214 ret
= NET_XMIT_SUCCESS
;
218 vrf_tx_error(dev
, skb
);
219 return NET_XMIT_DROP
;
222 static netdev_tx_t
vrf_process_v6_outbound(struct sk_buff
*skb
,
223 struct net_device
*dev
)
225 vrf_tx_error(dev
, skb
);
226 return NET_XMIT_DROP
;
230 static netdev_tx_t
vrf_process_v4_outbound(struct sk_buff
*skb
,
231 struct net_device
*vrf_dev
)
233 struct iphdr
*ip4h
= ip_hdr(skb
);
234 int ret
= NET_XMIT_DROP
;
235 struct flowi4 fl4
= {
236 /* needed to match OIF rule */
237 .flowi4_oif
= vrf_dev
->ifindex
,
238 .flowi4_iif
= LOOPBACK_IFINDEX
,
239 .flowi4_tos
= RT_TOS(ip4h
->tos
),
240 .flowi4_flags
= FLOWI_FLAG_ANYSRC
| FLOWI_FLAG_L3MDEV_SRC
|
241 FLOWI_FLAG_SKIP_NH_OIF
,
242 .daddr
= ip4h
->daddr
,
244 struct net
*net
= dev_net(vrf_dev
);
247 rt
= ip_route_output_flow(net
, &fl4
, NULL
);
251 if (rt
->rt_type
!= RTN_UNICAST
&& rt
->rt_type
!= RTN_LOCAL
) {
258 /* if dst.dev is loopback or the VRF device again this is locally
259 * originated traffic destined to a local address. Short circuit
260 * to Rx path using our local dst
262 if (rt
->dst
.dev
== net
->loopback_dev
|| rt
->dst
.dev
== vrf_dev
) {
263 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
264 struct rtable
*rth_local
;
265 struct dst_entry
*dst
= NULL
;
271 rth_local
= rcu_dereference(vrf
->rth_local
);
272 if (likely(rth_local
)) {
273 dst
= &rth_local
->dst
;
282 return vrf_local_xmit(skb
, vrf_dev
, dst
);
285 skb_dst_set(skb
, &rt
->dst
);
287 /* strip the ethernet header added for pass through VRF device */
288 __skb_pull(skb
, skb_network_offset(skb
));
291 ip4h
->saddr
= inet_select_addr(skb_dst(skb
)->dev
, 0,
295 ret
= ip_local_out(dev_net(skb_dst(skb
)->dev
), skb
->sk
, skb
);
296 if (unlikely(net_xmit_eval(ret
)))
297 vrf_dev
->stats
.tx_errors
++;
299 ret
= NET_XMIT_SUCCESS
;
304 vrf_tx_error(vrf_dev
, skb
);
308 static netdev_tx_t
is_ip_tx_frame(struct sk_buff
*skb
, struct net_device
*dev
)
310 switch (skb
->protocol
) {
311 case htons(ETH_P_IP
):
312 return vrf_process_v4_outbound(skb
, dev
);
313 case htons(ETH_P_IPV6
):
314 return vrf_process_v6_outbound(skb
, dev
);
316 vrf_tx_error(dev
, skb
);
317 return NET_XMIT_DROP
;
321 static netdev_tx_t
vrf_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
323 netdev_tx_t ret
= is_ip_tx_frame(skb
, dev
);
325 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
326 struct pcpu_dstats
*dstats
= this_cpu_ptr(dev
->dstats
);
328 u64_stats_update_begin(&dstats
->syncp
);
330 dstats
->tx_bytes
+= skb
->len
;
331 u64_stats_update_end(&dstats
->syncp
);
333 this_cpu_inc(dev
->dstats
->tx_drps
);
339 #if IS_ENABLED(CONFIG_IPV6)
340 /* modelled after ip6_finish_output2 */
341 static int vrf_finish_output6(struct net
*net
, struct sock
*sk
,
344 struct dst_entry
*dst
= skb_dst(skb
);
345 struct net_device
*dev
= dst
->dev
;
346 struct neighbour
*neigh
;
347 struct in6_addr
*nexthop
;
350 skb
->protocol
= htons(ETH_P_IPV6
);
354 nexthop
= rt6_nexthop((struct rt6_info
*)dst
, &ipv6_hdr(skb
)->daddr
);
355 neigh
= __ipv6_neigh_lookup_noref(dst
->dev
, nexthop
);
356 if (unlikely(!neigh
))
357 neigh
= __neigh_create(&nd_tbl
, nexthop
, dst
->dev
, false);
358 if (!IS_ERR(neigh
)) {
359 ret
= dst_neigh_output(dst
, neigh
, skb
);
360 rcu_read_unlock_bh();
363 rcu_read_unlock_bh();
365 IP6_INC_STATS(dev_net(dst
->dev
),
366 ip6_dst_idev(dst
), IPSTATS_MIB_OUTNOROUTES
);
371 /* modelled after ip6_output */
372 static int vrf_output6(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
374 return NF_HOOK_COND(NFPROTO_IPV6
, NF_INET_POST_ROUTING
,
375 net
, sk
, skb
, NULL
, skb_dst(skb
)->dev
,
377 !(IP6CB(skb
)->flags
& IP6SKB_REROUTED
));
381 static void vrf_rt6_release(struct net_device
*dev
, struct net_vrf
*vrf
)
383 struct rt6_info
*rt6
= rtnl_dereference(vrf
->rt6
);
384 struct rt6_info
*rt6_local
= rtnl_dereference(vrf
->rt6_local
);
385 struct net
*net
= dev_net(dev
);
386 struct dst_entry
*dst
;
388 RCU_INIT_POINTER(vrf
->rt6
, NULL
);
389 RCU_INIT_POINTER(vrf
->rt6_local
, NULL
);
392 /* move dev in dst's to loopback so this VRF device can be deleted
393 * - based on dst_ifdown
398 dst
->dev
= net
->loopback_dev
;
404 if (rt6_local
->rt6i_idev
)
405 in6_dev_put(rt6_local
->rt6i_idev
);
407 dst
= &rt6_local
->dst
;
409 dst
->dev
= net
->loopback_dev
;
415 static int vrf_rt6_create(struct net_device
*dev
)
417 int flags
= DST_HOST
| DST_NOPOLICY
| DST_NOXFRM
| DST_NOCACHE
;
418 struct net_vrf
*vrf
= netdev_priv(dev
);
419 struct net
*net
= dev_net(dev
);
420 struct fib6_table
*rt6i_table
;
421 struct rt6_info
*rt6
, *rt6_local
;
424 /* IPv6 can be CONFIG enabled and then disabled runtime */
425 if (!ipv6_mod_enabled())
428 rt6i_table
= fib6_new_table(net
, vrf
->tb_id
);
432 /* create a dst for routing packets out a VRF device */
433 rt6
= ip6_dst_alloc(net
, dev
, flags
);
439 rt6
->rt6i_table
= rt6i_table
;
440 rt6
->dst
.output
= vrf_output6
;
442 /* create a dst for local routing - packets sent locally
443 * to local address via the VRF device as a loopback
445 rt6_local
= ip6_dst_alloc(net
, dev
, flags
);
447 dst_release(&rt6
->dst
);
451 dst_hold(&rt6_local
->dst
);
453 rt6_local
->rt6i_idev
= in6_dev_get(dev
);
454 rt6_local
->rt6i_flags
= RTF_UP
| RTF_NONEXTHOP
| RTF_LOCAL
;
455 rt6_local
->rt6i_table
= rt6i_table
;
456 rt6_local
->dst
.input
= ip6_input
;
458 rcu_assign_pointer(vrf
->rt6
, rt6
);
459 rcu_assign_pointer(vrf
->rt6_local
, rt6_local
);
466 static void vrf_rt6_release(struct net_device
*dev
, struct net_vrf
*vrf
)
470 static int vrf_rt6_create(struct net_device
*dev
)
476 /* modelled after ip_finish_output2 */
477 static int vrf_finish_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
479 struct dst_entry
*dst
= skb_dst(skb
);
480 struct rtable
*rt
= (struct rtable
*)dst
;
481 struct net_device
*dev
= dst
->dev
;
482 unsigned int hh_len
= LL_RESERVED_SPACE(dev
);
483 struct neighbour
*neigh
;
487 /* Be paranoid, rather than too clever. */
488 if (unlikely(skb_headroom(skb
) < hh_len
&& dev
->header_ops
)) {
489 struct sk_buff
*skb2
;
491 skb2
= skb_realloc_headroom(skb
, LL_RESERVED_SPACE(dev
));
497 skb_set_owner_w(skb2
, skb
->sk
);
505 nexthop
= (__force u32
)rt_nexthop(rt
, ip_hdr(skb
)->daddr
);
506 neigh
= __ipv4_neigh_lookup_noref(dev
, nexthop
);
507 if (unlikely(!neigh
))
508 neigh
= __neigh_create(&arp_tbl
, &nexthop
, dev
, false);
510 ret
= dst_neigh_output(dst
, neigh
, skb
);
512 rcu_read_unlock_bh();
514 if (unlikely(ret
< 0))
515 vrf_tx_error(skb
->dev
, skb
);
519 static int vrf_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
521 struct net_device
*dev
= skb_dst(skb
)->dev
;
523 IP_UPD_PO_STATS(net
, IPSTATS_MIB_OUT
, skb
->len
);
526 skb
->protocol
= htons(ETH_P_IP
);
528 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
529 net
, sk
, skb
, NULL
, dev
,
531 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
535 static void vrf_rtable_release(struct net_device
*dev
, struct net_vrf
*vrf
)
537 struct rtable
*rth
= rtnl_dereference(vrf
->rth
);
538 struct rtable
*rth_local
= rtnl_dereference(vrf
->rth_local
);
539 struct net
*net
= dev_net(dev
);
540 struct dst_entry
*dst
;
542 RCU_INIT_POINTER(vrf
->rth
, NULL
);
543 RCU_INIT_POINTER(vrf
->rth_local
, NULL
);
546 /* move dev in dst's to loopback so this VRF device can be deleted
547 * - based on dst_ifdown
552 dst
->dev
= net
->loopback_dev
;
558 dst
= &rth_local
->dst
;
560 dst
->dev
= net
->loopback_dev
;
566 static int vrf_rtable_create(struct net_device
*dev
)
568 struct net_vrf
*vrf
= netdev_priv(dev
);
569 struct rtable
*rth
, *rth_local
;
571 if (!fib_new_table(dev_net(dev
), vrf
->tb_id
))
574 /* create a dst for routing packets out through a VRF device */
575 rth
= rt_dst_alloc(dev
, 0, RTN_UNICAST
, 1, 1, 0);
579 /* create a dst for local ingress routing - packets sent locally
580 * to local address via the VRF device as a loopback
582 rth_local
= rt_dst_alloc(dev
, RTCF_LOCAL
, RTN_LOCAL
, 1, 1, 0);
584 dst_release(&rth
->dst
);
588 rth
->dst
.output
= vrf_output
;
589 rth
->rt_table_id
= vrf
->tb_id
;
591 rth_local
->rt_table_id
= vrf
->tb_id
;
593 rcu_assign_pointer(vrf
->rth
, rth
);
594 rcu_assign_pointer(vrf
->rth_local
, rth_local
);
599 /**************************** device handling ********************/
601 /* cycle interface to flush neighbor cache and move routes across tables */
602 static void cycle_netdev(struct net_device
*dev
)
604 unsigned int flags
= dev
->flags
;
607 if (!netif_running(dev
))
610 ret
= dev_change_flags(dev
, flags
& ~IFF_UP
);
612 ret
= dev_change_flags(dev
, flags
);
616 "Failed to cycle device %s; route tables might be wrong!\n",
621 static int do_vrf_add_slave(struct net_device
*dev
, struct net_device
*port_dev
)
625 ret
= netdev_master_upper_dev_link(port_dev
, dev
, NULL
, NULL
);
629 port_dev
->priv_flags
|= IFF_L3MDEV_SLAVE
;
630 cycle_netdev(port_dev
);
635 static int vrf_add_slave(struct net_device
*dev
, struct net_device
*port_dev
)
637 if (netif_is_l3_master(port_dev
) || netif_is_l3_slave(port_dev
))
640 return do_vrf_add_slave(dev
, port_dev
);
643 /* inverse of do_vrf_add_slave */
644 static int do_vrf_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
646 netdev_upper_dev_unlink(port_dev
, dev
);
647 port_dev
->priv_flags
&= ~IFF_L3MDEV_SLAVE
;
649 cycle_netdev(port_dev
);
654 static int vrf_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
656 return do_vrf_del_slave(dev
, port_dev
);
659 static void vrf_dev_uninit(struct net_device
*dev
)
661 struct net_vrf
*vrf
= netdev_priv(dev
);
662 struct net_device
*port_dev
;
663 struct list_head
*iter
;
665 vrf_rtable_release(dev
, vrf
);
666 vrf_rt6_release(dev
, vrf
);
668 netdev_for_each_lower_dev(dev
, port_dev
, iter
)
669 vrf_del_slave(dev
, port_dev
);
671 free_percpu(dev
->dstats
);
675 static int vrf_dev_init(struct net_device
*dev
)
677 struct net_vrf
*vrf
= netdev_priv(dev
);
679 dev
->dstats
= netdev_alloc_pcpu_stats(struct pcpu_dstats
);
683 /* create the default dst which points back to us */
684 if (vrf_rtable_create(dev
) != 0)
687 if (vrf_rt6_create(dev
) != 0)
690 dev
->flags
= IFF_MASTER
| IFF_NOARP
;
692 /* MTU is irrelevant for VRF device; set to 64k similar to lo */
693 dev
->mtu
= 64 * 1024;
695 /* similarly, oper state is irrelevant; set to up to avoid confusion */
696 dev
->operstate
= IF_OPER_UP
;
697 netdev_lockdep_set_classes(dev
);
701 vrf_rtable_release(dev
, vrf
);
703 free_percpu(dev
->dstats
);
709 static const struct net_device_ops vrf_netdev_ops
= {
710 .ndo_init
= vrf_dev_init
,
711 .ndo_uninit
= vrf_dev_uninit
,
712 .ndo_start_xmit
= vrf_xmit
,
713 .ndo_get_stats64
= vrf_get_stats64
,
714 .ndo_add_slave
= vrf_add_slave
,
715 .ndo_del_slave
= vrf_del_slave
,
718 static u32
vrf_fib_table(const struct net_device
*dev
)
720 struct net_vrf
*vrf
= netdev_priv(dev
);
725 static struct rtable
*vrf_get_rtable(const struct net_device
*dev
,
726 const struct flowi4
*fl4
)
728 struct rtable
*rth
= NULL
;
730 if (!(fl4
->flowi4_flags
& FLOWI_FLAG_L3MDEV_SRC
)) {
731 struct net_vrf
*vrf
= netdev_priv(dev
);
735 rth
= rcu_dereference(vrf
->rth
);
745 /* called under rcu_read_lock */
746 static int vrf_get_saddr(struct net_device
*dev
, struct flowi4
*fl4
)
748 struct fib_result res
= { .tclassid
= 0 };
749 struct net
*net
= dev_net(dev
);
750 u32 orig_tos
= fl4
->flowi4_tos
;
751 u8 flags
= fl4
->flowi4_flags
;
752 u8 scope
= fl4
->flowi4_scope
;
753 u8 tos
= RT_FL_TOS(fl4
);
756 if (unlikely(!fl4
->daddr
))
759 fl4
->flowi4_flags
|= FLOWI_FLAG_SKIP_NH_OIF
;
760 fl4
->flowi4_iif
= LOOPBACK_IFINDEX
;
761 /* make sure oif is set to VRF device for lookup */
762 fl4
->flowi4_oif
= dev
->ifindex
;
763 fl4
->flowi4_tos
= tos
& IPTOS_RT_MASK
;
764 fl4
->flowi4_scope
= ((tos
& RTO_ONLINK
) ?
765 RT_SCOPE_LINK
: RT_SCOPE_UNIVERSE
);
767 rc
= fib_lookup(net
, fl4
, &res
, 0);
769 if (res
.type
== RTN_LOCAL
)
770 fl4
->saddr
= res
.fi
->fib_prefsrc
? : fl4
->daddr
;
772 fib_select_path(net
, &res
, fl4
, -1);
775 fl4
->flowi4_flags
= flags
;
776 fl4
->flowi4_tos
= orig_tos
;
777 fl4
->flowi4_scope
= scope
;
782 static int vrf_rcv_finish(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
787 static struct sk_buff
*vrf_rcv_nfhook(u8 pf
, unsigned int hook
,
789 struct net_device
*dev
)
791 struct net
*net
= dev_net(dev
);
795 if (NF_HOOK(pf
, hook
, net
, NULL
, skb
, dev
, NULL
, vrf_rcv_finish
) < 0)
796 skb
= NULL
; /* kfree_skb(skb) handled by nf code */
801 #if IS_ENABLED(CONFIG_IPV6)
802 /* neighbor handling is done with actual device; do not want
803 * to flip skb->dev for those ndisc packets. This really fails
804 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
807 static bool ipv6_ndisc_frame(const struct sk_buff
*skb
)
809 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
812 if (iph
->nexthdr
== NEXTHDR_ICMP
) {
813 const struct icmp6hdr
*icmph
;
814 struct icmp6hdr _icmph
;
816 icmph
= skb_header_pointer(skb
, sizeof(*iph
),
817 sizeof(_icmph
), &_icmph
);
821 switch (icmph
->icmp6_type
) {
822 case NDISC_ROUTER_SOLICITATION
:
823 case NDISC_ROUTER_ADVERTISEMENT
:
824 case NDISC_NEIGHBOUR_SOLICITATION
:
825 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
836 static struct rt6_info
*vrf_ip6_route_lookup(struct net
*net
,
837 const struct net_device
*dev
,
842 struct net_vrf
*vrf
= netdev_priv(dev
);
843 struct fib6_table
*table
= NULL
;
844 struct rt6_info
*rt6
;
848 /* fib6_table does not have a refcnt and can not be freed */
849 rt6
= rcu_dereference(vrf
->rt6
);
851 table
= rt6
->rt6i_table
;
858 return ip6_pol_route(net
, table
, ifindex
, fl6
, flags
);
861 static void vrf_ip6_input_dst(struct sk_buff
*skb
, struct net_device
*vrf_dev
,
864 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
865 struct flowi6 fl6
= {
868 .flowlabel
= ip6_flowinfo(iph
),
869 .flowi6_mark
= skb
->mark
,
870 .flowi6_proto
= iph
->nexthdr
,
871 .flowi6_iif
= ifindex
,
873 struct net
*net
= dev_net(vrf_dev
);
874 struct rt6_info
*rt6
;
876 rt6
= vrf_ip6_route_lookup(net
, vrf_dev
, &fl6
, ifindex
,
877 RT6_LOOKUP_F_HAS_SADDR
| RT6_LOOKUP_F_IFACE
);
881 if (unlikely(&rt6
->dst
== &net
->ipv6
.ip6_null_entry
->dst
))
884 skb_dst_set(skb
, &rt6
->dst
);
887 static struct sk_buff
*vrf_ip6_rcv(struct net_device
*vrf_dev
,
890 int orig_iif
= skb
->skb_iif
;
893 /* loopback traffic; do not push through packet taps again.
894 * Reset pkt_type for upper layers to process skb
896 if (skb
->pkt_type
== PACKET_LOOPBACK
) {
898 skb
->skb_iif
= vrf_dev
->ifindex
;
899 skb
->pkt_type
= PACKET_HOST
;
903 /* if packet is NDISC or addressed to multicast or link-local
904 * then keep the ingress interface
906 need_strict
= rt6_need_strict(&ipv6_hdr(skb
)->daddr
);
907 if (!ipv6_ndisc_frame(skb
) && !need_strict
) {
909 skb
->skb_iif
= vrf_dev
->ifindex
;
911 skb_push(skb
, skb
->mac_len
);
912 dev_queue_xmit_nit(skb
, vrf_dev
);
913 skb_pull(skb
, skb
->mac_len
);
915 IP6CB(skb
)->flags
|= IP6SKB_L3SLAVE
;
919 vrf_ip6_input_dst(skb
, vrf_dev
, orig_iif
);
921 skb
= vrf_rcv_nfhook(NFPROTO_IPV6
, NF_INET_PRE_ROUTING
, skb
, vrf_dev
);
927 static struct sk_buff
*vrf_ip6_rcv(struct net_device
*vrf_dev
,
934 static struct sk_buff
*vrf_ip_rcv(struct net_device
*vrf_dev
,
938 skb
->skb_iif
= vrf_dev
->ifindex
;
940 /* loopback traffic; do not push through packet taps again.
941 * Reset pkt_type for upper layers to process skb
943 if (skb
->pkt_type
== PACKET_LOOPBACK
) {
944 skb
->pkt_type
= PACKET_HOST
;
948 skb_push(skb
, skb
->mac_len
);
949 dev_queue_xmit_nit(skb
, vrf_dev
);
950 skb_pull(skb
, skb
->mac_len
);
952 skb
= vrf_rcv_nfhook(NFPROTO_IPV4
, NF_INET_PRE_ROUTING
, skb
, vrf_dev
);
957 /* called with rcu lock held */
958 static struct sk_buff
*vrf_l3_rcv(struct net_device
*vrf_dev
,
964 return vrf_ip_rcv(vrf_dev
, skb
);
966 return vrf_ip6_rcv(vrf_dev
, skb
);
972 #if IS_ENABLED(CONFIG_IPV6)
973 static struct dst_entry
*vrf_get_rt6_dst(const struct net_device
*dev
,
976 bool need_strict
= rt6_need_strict(&fl6
->daddr
);
977 struct net_vrf
*vrf
= netdev_priv(dev
);
978 struct net
*net
= dev_net(dev
);
979 struct dst_entry
*dst
= NULL
;
982 /* send to link-local or multicast address */
984 int flags
= RT6_LOOKUP_F_IFACE
;
986 /* VRF device does not have a link-local address and
987 * sending packets to link-local or mcast addresses over
988 * a VRF device does not make sense
990 if (fl6
->flowi6_oif
== dev
->ifindex
) {
991 struct dst_entry
*dst
= &net
->ipv6
.ip6_null_entry
->dst
;
997 if (!ipv6_addr_any(&fl6
->saddr
))
998 flags
|= RT6_LOOKUP_F_HAS_SADDR
;
1000 rt
= vrf_ip6_route_lookup(net
, dev
, fl6
, fl6
->flowi6_oif
, flags
);
1004 } else if (!(fl6
->flowi6_flags
& FLOWI_FLAG_L3MDEV_SRC
)) {
1008 rt
= rcu_dereference(vrf
->rt6
);
1017 /* make sure oif is set to VRF device for lookup */
1019 fl6
->flowi6_oif
= dev
->ifindex
;
1024 /* called under rcu_read_lock */
1025 static int vrf_get_saddr6(struct net_device
*dev
, const struct sock
*sk
,
1028 struct net
*net
= dev_net(dev
);
1029 struct dst_entry
*dst
;
1030 struct rt6_info
*rt
;
1033 if (rt6_need_strict(&fl6
->daddr
)) {
1034 rt
= vrf_ip6_route_lookup(net
, dev
, fl6
, fl6
->flowi6_oif
,
1035 RT6_LOOKUP_F_IFACE
);
1041 __u8 flags
= fl6
->flowi6_flags
;
1043 fl6
->flowi6_flags
|= FLOWI_FLAG_L3MDEV_SRC
;
1044 fl6
->flowi6_flags
|= FLOWI_FLAG_SKIP_NH_OIF
;
1046 dst
= ip6_route_output(net
, sk
, fl6
);
1047 rt
= (struct rt6_info
*)dst
;
1049 fl6
->flowi6_flags
= flags
;
1054 err
= ip6_route_get_saddr(net
, rt
, &fl6
->daddr
,
1055 sk
? inet6_sk(sk
)->srcprefs
: 0,
1065 static const struct l3mdev_ops vrf_l3mdev_ops
= {
1066 .l3mdev_fib_table
= vrf_fib_table
,
1067 .l3mdev_get_rtable
= vrf_get_rtable
,
1068 .l3mdev_get_saddr
= vrf_get_saddr
,
1069 .l3mdev_l3_rcv
= vrf_l3_rcv
,
1070 #if IS_ENABLED(CONFIG_IPV6)
1071 .l3mdev_get_rt6_dst
= vrf_get_rt6_dst
,
1072 .l3mdev_get_saddr6
= vrf_get_saddr6
,
1076 static void vrf_get_drvinfo(struct net_device
*dev
,
1077 struct ethtool_drvinfo
*info
)
1079 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
1080 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
1083 static const struct ethtool_ops vrf_ethtool_ops
= {
1084 .get_drvinfo
= vrf_get_drvinfo
,
1087 static inline size_t vrf_fib_rule_nl_size(void)
1091 sz
= NLMSG_ALIGN(sizeof(struct fib_rule_hdr
));
1092 sz
+= nla_total_size(sizeof(u8
)); /* FRA_L3MDEV */
1093 sz
+= nla_total_size(sizeof(u32
)); /* FRA_PRIORITY */
1098 static int vrf_fib_rule(const struct net_device
*dev
, __u8 family
, bool add_it
)
1100 struct fib_rule_hdr
*frh
;
1101 struct nlmsghdr
*nlh
;
1102 struct sk_buff
*skb
;
1105 if (family
== AF_INET6
&& !ipv6_mod_enabled())
1108 skb
= nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL
);
1112 nlh
= nlmsg_put(skb
, 0, 0, 0, sizeof(*frh
), 0);
1114 goto nla_put_failure
;
1116 /* rule only needs to appear once */
1117 nlh
->nlmsg_flags
&= NLM_F_EXCL
;
1119 frh
= nlmsg_data(nlh
);
1120 memset(frh
, 0, sizeof(*frh
));
1121 frh
->family
= family
;
1122 frh
->action
= FR_ACT_TO_TBL
;
1124 if (nla_put_u32(skb
, FRA_L3MDEV
, 1))
1125 goto nla_put_failure
;
1127 if (nla_put_u32(skb
, FRA_PRIORITY
, FIB_RULE_PREF
))
1128 goto nla_put_failure
;
1130 nlmsg_end(skb
, nlh
);
1132 /* fib_nl_{new,del}rule handling looks for net from skb->sk */
1133 skb
->sk
= dev_net(dev
)->rtnl
;
1135 err
= fib_nl_newrule(skb
, nlh
);
1139 err
= fib_nl_delrule(skb
, nlh
);
1153 static int vrf_add_fib_rules(const struct net_device
*dev
)
1157 err
= vrf_fib_rule(dev
, AF_INET
, true);
1161 err
= vrf_fib_rule(dev
, AF_INET6
, true);
1168 vrf_fib_rule(dev
, AF_INET
, false);
1171 netdev_err(dev
, "Failed to add FIB rules.\n");
1175 static void vrf_setup(struct net_device
*dev
)
1179 /* Initialize the device structure. */
1180 dev
->netdev_ops
= &vrf_netdev_ops
;
1181 dev
->l3mdev_ops
= &vrf_l3mdev_ops
;
1182 dev
->ethtool_ops
= &vrf_ethtool_ops
;
1183 dev
->destructor
= free_netdev
;
1185 /* Fill in device structure with ethernet-generic values. */
1186 eth_hw_addr_random(dev
);
1188 /* don't acquire vrf device's netif_tx_lock when transmitting */
1189 dev
->features
|= NETIF_F_LLTX
;
1191 /* don't allow vrf devices to change network namespaces. */
1192 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1194 /* does not make sense for a VLAN to be added to a vrf device */
1195 dev
->features
|= NETIF_F_VLAN_CHALLENGED
;
1197 /* enable offload features */
1198 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
1199 dev
->features
|= NETIF_F_RXCSUM
| NETIF_F_HW_CSUM
;
1200 dev
->features
|= NETIF_F_SG
| NETIF_F_FRAGLIST
| NETIF_F_HIGHDMA
;
1202 dev
->hw_features
= dev
->features
;
1203 dev
->hw_enc_features
= dev
->features
;
1205 /* default to no qdisc; user can add if desired */
1206 dev
->priv_flags
|= IFF_NO_QUEUE
;
1209 static int vrf_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1211 if (tb
[IFLA_ADDRESS
]) {
1212 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
1214 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
1215 return -EADDRNOTAVAIL
;
1220 static void vrf_dellink(struct net_device
*dev
, struct list_head
*head
)
1222 unregister_netdevice_queue(dev
, head
);
1225 static int vrf_newlink(struct net
*src_net
, struct net_device
*dev
,
1226 struct nlattr
*tb
[], struct nlattr
*data
[])
1228 struct net_vrf
*vrf
= netdev_priv(dev
);
1231 if (!data
|| !data
[IFLA_VRF_TABLE
])
1234 vrf
->tb_id
= nla_get_u32(data
[IFLA_VRF_TABLE
]);
1236 dev
->priv_flags
|= IFF_L3MDEV_MASTER
;
1238 err
= register_netdevice(dev
);
1242 if (add_fib_rules
) {
1243 err
= vrf_add_fib_rules(dev
);
1245 unregister_netdevice(dev
);
1248 add_fib_rules
= false;
1255 static size_t vrf_nl_getsize(const struct net_device
*dev
)
1257 return nla_total_size(sizeof(u32
)); /* IFLA_VRF_TABLE */
1260 static int vrf_fillinfo(struct sk_buff
*skb
,
1261 const struct net_device
*dev
)
1263 struct net_vrf
*vrf
= netdev_priv(dev
);
1265 return nla_put_u32(skb
, IFLA_VRF_TABLE
, vrf
->tb_id
);
1268 static size_t vrf_get_slave_size(const struct net_device
*bond_dev
,
1269 const struct net_device
*slave_dev
)
1271 return nla_total_size(sizeof(u32
)); /* IFLA_VRF_PORT_TABLE */
1274 static int vrf_fill_slave_info(struct sk_buff
*skb
,
1275 const struct net_device
*vrf_dev
,
1276 const struct net_device
*slave_dev
)
1278 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
1280 if (nla_put_u32(skb
, IFLA_VRF_PORT_TABLE
, vrf
->tb_id
))
1286 static const struct nla_policy vrf_nl_policy
[IFLA_VRF_MAX
+ 1] = {
1287 [IFLA_VRF_TABLE
] = { .type
= NLA_U32
},
1290 static struct rtnl_link_ops vrf_link_ops __read_mostly
= {
1292 .priv_size
= sizeof(struct net_vrf
),
1294 .get_size
= vrf_nl_getsize
,
1295 .policy
= vrf_nl_policy
,
1296 .validate
= vrf_validate
,
1297 .fill_info
= vrf_fillinfo
,
1299 .get_slave_size
= vrf_get_slave_size
,
1300 .fill_slave_info
= vrf_fill_slave_info
,
1302 .newlink
= vrf_newlink
,
1303 .dellink
= vrf_dellink
,
1305 .maxtype
= IFLA_VRF_MAX
,
1308 static int vrf_device_event(struct notifier_block
*unused
,
1309 unsigned long event
, void *ptr
)
1311 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1313 /* only care about unregister events to drop slave references */
1314 if (event
== NETDEV_UNREGISTER
) {
1315 struct net_device
*vrf_dev
;
1317 if (!netif_is_l3_slave(dev
))
1320 vrf_dev
= netdev_master_upper_dev_get(dev
);
1321 vrf_del_slave(vrf_dev
, dev
);
1327 static struct notifier_block vrf_notifier_block __read_mostly
= {
1328 .notifier_call
= vrf_device_event
,
1331 static int __init
vrf_init_module(void)
1335 register_netdevice_notifier(&vrf_notifier_block
);
1337 rc
= rtnl_link_register(&vrf_link_ops
);
1344 unregister_netdevice_notifier(&vrf_notifier_block
);
1348 module_init(vrf_init_module
);
1349 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
1350 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
1351 MODULE_LICENSE("GPL");
1352 MODULE_ALIAS_RTNL_LINK(DRV_NAME
);
1353 MODULE_VERSION(DRV_VERSION
);