2 * vrf.c: device driver to encapsulate a VRF space
4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
8 * Based on dummy, team and ipvlan drivers
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
21 #include <linux/init.h>
22 #include <linux/moduleparam.h>
23 #include <linux/netfilter.h>
24 #include <linux/rtnetlink.h>
25 #include <net/rtnetlink.h>
26 #include <linux/u64_stats_sync.h>
27 #include <linux/hashtable.h>
29 #include <linux/inetdevice.h>
32 #include <net/ip_fib.h>
33 #include <net/ip6_fib.h>
34 #include <net/ip6_route.h>
35 #include <net/route.h>
36 #include <net/addrconf.h>
37 #include <net/l3mdev.h>
39 #define RT_FL_TOS(oldflp4) \
40 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
42 #define DRV_NAME "vrf"
43 #define DRV_VERSION "1.0"
46 struct rtable __rcu
*rth
;
47 struct rtable __rcu
*rth_local
;
48 struct rt6_info __rcu
*rt6
;
49 struct rt6_info __rcu
*rt6_local
;
60 struct u64_stats_sync syncp
;
63 static void vrf_rx_stats(struct net_device
*dev
, int len
)
65 struct pcpu_dstats
*dstats
= this_cpu_ptr(dev
->dstats
);
67 u64_stats_update_begin(&dstats
->syncp
);
69 dstats
->rx_bytes
+= len
;
70 u64_stats_update_end(&dstats
->syncp
);
73 static void vrf_tx_error(struct net_device
*vrf_dev
, struct sk_buff
*skb
)
75 vrf_dev
->stats
.tx_errors
++;
79 static struct rtnl_link_stats64
*vrf_get_stats64(struct net_device
*dev
,
80 struct rtnl_link_stats64
*stats
)
84 for_each_possible_cpu(i
) {
85 const struct pcpu_dstats
*dstats
;
86 u64 tbytes
, tpkts
, tdrops
, rbytes
, rpkts
;
89 dstats
= per_cpu_ptr(dev
->dstats
, i
);
91 start
= u64_stats_fetch_begin_irq(&dstats
->syncp
);
92 tbytes
= dstats
->tx_bytes
;
93 tpkts
= dstats
->tx_pkts
;
94 tdrops
= dstats
->tx_drps
;
95 rbytes
= dstats
->rx_bytes
;
96 rpkts
= dstats
->rx_pkts
;
97 } while (u64_stats_fetch_retry_irq(&dstats
->syncp
, start
));
98 stats
->tx_bytes
+= tbytes
;
99 stats
->tx_packets
+= tpkts
;
100 stats
->tx_dropped
+= tdrops
;
101 stats
->rx_bytes
+= rbytes
;
102 stats
->rx_packets
+= rpkts
;
107 /* Local traffic destined to local address. Reinsert the packet to rx
108 * path, similar to loopback handling.
110 static int vrf_local_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
111 struct dst_entry
*dst
)
117 skb_dst_set(skb
, dst
);
120 /* set pkt_type to avoid skb hitting packet taps twice -
121 * once on Tx and again in Rx processing
123 skb
->pkt_type
= PACKET_LOOPBACK
;
125 skb
->protocol
= eth_type_trans(skb
, dev
);
127 if (likely(netif_rx(skb
) == NET_RX_SUCCESS
))
128 vrf_rx_stats(dev
, len
);
130 this_cpu_inc(dev
->dstats
->rx_drps
);
135 #if IS_ENABLED(CONFIG_IPV6)
136 static netdev_tx_t
vrf_process_v6_outbound(struct sk_buff
*skb
,
137 struct net_device
*dev
)
139 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
140 struct net
*net
= dev_net(skb
->dev
);
141 struct flowi6 fl6
= {
142 /* needed to match OIF rule */
143 .flowi6_oif
= dev
->ifindex
,
144 .flowi6_iif
= LOOPBACK_IFINDEX
,
147 .flowlabel
= ip6_flowinfo(iph
),
148 .flowi6_mark
= skb
->mark
,
149 .flowi6_proto
= iph
->nexthdr
,
150 .flowi6_flags
= FLOWI_FLAG_L3MDEV_SRC
| FLOWI_FLAG_SKIP_NH_OIF
,
152 int ret
= NET_XMIT_DROP
;
153 struct dst_entry
*dst
;
154 struct dst_entry
*dst_null
= &net
->ipv6
.ip6_null_entry
->dst
;
156 dst
= ip6_route_output(net
, NULL
, &fl6
);
162 /* if dst.dev is loopback or the VRF device again this is locally
163 * originated traffic destined to a local address. Short circuit
164 * to Rx path using our local dst
166 if (dst
->dev
== net
->loopback_dev
|| dst
->dev
== dev
) {
167 struct net_vrf
*vrf
= netdev_priv(dev
);
168 struct rt6_info
*rt6_local
;
170 /* release looked up dst and use cached local dst */
175 rt6_local
= rcu_dereference(vrf
->rt6_local
);
176 if (unlikely(!rt6_local
)) {
181 /* Ordering issue: cached local dst is created on newlink
182 * before the IPv6 initialization. Using the local dst
183 * requires rt6i_idev to be set so make sure it is.
185 if (unlikely(!rt6_local
->rt6i_idev
)) {
186 rt6_local
->rt6i_idev
= in6_dev_get(dev
);
187 if (!rt6_local
->rt6i_idev
) {
193 dst
= &rt6_local
->dst
;
198 return vrf_local_xmit(skb
, dev
, &rt6_local
->dst
);
201 skb_dst_set(skb
, dst
);
203 /* strip the ethernet header added for pass through VRF device */
204 __skb_pull(skb
, skb_network_offset(skb
));
206 ret
= ip6_local_out(net
, skb
->sk
, skb
);
207 if (unlikely(net_xmit_eval(ret
)))
208 dev
->stats
.tx_errors
++;
210 ret
= NET_XMIT_SUCCESS
;
214 vrf_tx_error(dev
, skb
);
215 return NET_XMIT_DROP
;
218 static netdev_tx_t
vrf_process_v6_outbound(struct sk_buff
*skb
,
219 struct net_device
*dev
)
221 vrf_tx_error(dev
, skb
);
222 return NET_XMIT_DROP
;
226 static netdev_tx_t
vrf_process_v4_outbound(struct sk_buff
*skb
,
227 struct net_device
*vrf_dev
)
229 struct iphdr
*ip4h
= ip_hdr(skb
);
230 int ret
= NET_XMIT_DROP
;
231 struct flowi4 fl4
= {
232 /* needed to match OIF rule */
233 .flowi4_oif
= vrf_dev
->ifindex
,
234 .flowi4_iif
= LOOPBACK_IFINDEX
,
235 .flowi4_tos
= RT_TOS(ip4h
->tos
),
236 .flowi4_flags
= FLOWI_FLAG_ANYSRC
| FLOWI_FLAG_L3MDEV_SRC
|
237 FLOWI_FLAG_SKIP_NH_OIF
,
238 .daddr
= ip4h
->daddr
,
240 struct net
*net
= dev_net(vrf_dev
);
243 rt
= ip_route_output_flow(net
, &fl4
, NULL
);
247 if (rt
->rt_type
!= RTN_UNICAST
&& rt
->rt_type
!= RTN_LOCAL
) {
254 /* if dst.dev is loopback or the VRF device again this is locally
255 * originated traffic destined to a local address. Short circuit
256 * to Rx path using our local dst
258 if (rt
->dst
.dev
== net
->loopback_dev
|| rt
->dst
.dev
== vrf_dev
) {
259 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
260 struct rtable
*rth_local
;
261 struct dst_entry
*dst
= NULL
;
267 rth_local
= rcu_dereference(vrf
->rth_local
);
268 if (likely(rth_local
)) {
269 dst
= &rth_local
->dst
;
278 return vrf_local_xmit(skb
, vrf_dev
, dst
);
281 skb_dst_set(skb
, &rt
->dst
);
283 /* strip the ethernet header added for pass through VRF device */
284 __skb_pull(skb
, skb_network_offset(skb
));
287 ip4h
->saddr
= inet_select_addr(skb_dst(skb
)->dev
, 0,
291 ret
= ip_local_out(dev_net(skb_dst(skb
)->dev
), skb
->sk
, skb
);
292 if (unlikely(net_xmit_eval(ret
)))
293 vrf_dev
->stats
.tx_errors
++;
295 ret
= NET_XMIT_SUCCESS
;
300 vrf_tx_error(vrf_dev
, skb
);
304 static netdev_tx_t
is_ip_tx_frame(struct sk_buff
*skb
, struct net_device
*dev
)
306 switch (skb
->protocol
) {
307 case htons(ETH_P_IP
):
308 return vrf_process_v4_outbound(skb
, dev
);
309 case htons(ETH_P_IPV6
):
310 return vrf_process_v6_outbound(skb
, dev
);
312 vrf_tx_error(dev
, skb
);
313 return NET_XMIT_DROP
;
317 static netdev_tx_t
vrf_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
319 netdev_tx_t ret
= is_ip_tx_frame(skb
, dev
);
321 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
322 struct pcpu_dstats
*dstats
= this_cpu_ptr(dev
->dstats
);
324 u64_stats_update_begin(&dstats
->syncp
);
326 dstats
->tx_bytes
+= skb
->len
;
327 u64_stats_update_end(&dstats
->syncp
);
329 this_cpu_inc(dev
->dstats
->tx_drps
);
335 #if IS_ENABLED(CONFIG_IPV6)
336 /* modelled after ip6_finish_output2 */
337 static int vrf_finish_output6(struct net
*net
, struct sock
*sk
,
340 struct dst_entry
*dst
= skb_dst(skb
);
341 struct net_device
*dev
= dst
->dev
;
342 struct neighbour
*neigh
;
343 struct in6_addr
*nexthop
;
346 skb
->protocol
= htons(ETH_P_IPV6
);
350 nexthop
= rt6_nexthop((struct rt6_info
*)dst
, &ipv6_hdr(skb
)->daddr
);
351 neigh
= __ipv6_neigh_lookup_noref(dst
->dev
, nexthop
);
352 if (unlikely(!neigh
))
353 neigh
= __neigh_create(&nd_tbl
, nexthop
, dst
->dev
, false);
354 if (!IS_ERR(neigh
)) {
355 ret
= dst_neigh_output(dst
, neigh
, skb
);
356 rcu_read_unlock_bh();
359 rcu_read_unlock_bh();
361 IP6_INC_STATS(dev_net(dst
->dev
),
362 ip6_dst_idev(dst
), IPSTATS_MIB_OUTNOROUTES
);
367 /* modelled after ip6_output */
368 static int vrf_output6(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
370 return NF_HOOK_COND(NFPROTO_IPV6
, NF_INET_POST_ROUTING
,
371 net
, sk
, skb
, NULL
, skb_dst(skb
)->dev
,
373 !(IP6CB(skb
)->flags
& IP6SKB_REROUTED
));
377 static void vrf_rt6_release(struct net_vrf
*vrf
)
379 struct rt6_info
*rt6
= rtnl_dereference(vrf
->rt6
);
380 struct rt6_info
*rt6_local
= rtnl_dereference(vrf
->rt6_local
);
382 RCU_INIT_POINTER(vrf
->rt6
, NULL
);
383 RCU_INIT_POINTER(vrf
->rt6_local
, NULL
);
387 dst_release(&rt6
->dst
);
390 if (rt6_local
->rt6i_idev
)
391 in6_dev_put(rt6_local
->rt6i_idev
);
393 dst_release(&rt6_local
->dst
);
397 static int vrf_rt6_create(struct net_device
*dev
)
399 int flags
= DST_HOST
| DST_NOPOLICY
| DST_NOXFRM
| DST_NOCACHE
;
400 struct net_vrf
*vrf
= netdev_priv(dev
);
401 struct net
*net
= dev_net(dev
);
402 struct fib6_table
*rt6i_table
;
403 struct rt6_info
*rt6
, *rt6_local
;
406 rt6i_table
= fib6_new_table(net
, vrf
->tb_id
);
410 /* create a dst for routing packets out a VRF device */
411 rt6
= ip6_dst_alloc(net
, dev
, flags
);
417 rt6
->rt6i_table
= rt6i_table
;
418 rt6
->dst
.output
= vrf_output6
;
420 /* create a dst for local routing - packets sent locally
421 * to local address via the VRF device as a loopback
423 rt6_local
= ip6_dst_alloc(net
, dev
, flags
);
425 dst_release(&rt6
->dst
);
429 dst_hold(&rt6_local
->dst
);
431 rt6_local
->rt6i_idev
= in6_dev_get(dev
);
432 rt6_local
->rt6i_flags
= RTF_UP
| RTF_NONEXTHOP
| RTF_LOCAL
;
433 rt6_local
->rt6i_table
= rt6i_table
;
434 rt6_local
->dst
.input
= ip6_input
;
436 rcu_assign_pointer(vrf
->rt6
, rt6
);
437 rcu_assign_pointer(vrf
->rt6_local
, rt6_local
);
444 static void vrf_rt6_release(struct net_vrf
*vrf
)
448 static int vrf_rt6_create(struct net_device
*dev
)
454 /* modelled after ip_finish_output2 */
455 static int vrf_finish_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
457 struct dst_entry
*dst
= skb_dst(skb
);
458 struct rtable
*rt
= (struct rtable
*)dst
;
459 struct net_device
*dev
= dst
->dev
;
460 unsigned int hh_len
= LL_RESERVED_SPACE(dev
);
461 struct neighbour
*neigh
;
465 /* Be paranoid, rather than too clever. */
466 if (unlikely(skb_headroom(skb
) < hh_len
&& dev
->header_ops
)) {
467 struct sk_buff
*skb2
;
469 skb2
= skb_realloc_headroom(skb
, LL_RESERVED_SPACE(dev
));
475 skb_set_owner_w(skb2
, skb
->sk
);
483 nexthop
= (__force u32
)rt_nexthop(rt
, ip_hdr(skb
)->daddr
);
484 neigh
= __ipv4_neigh_lookup_noref(dev
, nexthop
);
485 if (unlikely(!neigh
))
486 neigh
= __neigh_create(&arp_tbl
, &nexthop
, dev
, false);
488 ret
= dst_neigh_output(dst
, neigh
, skb
);
490 rcu_read_unlock_bh();
492 if (unlikely(ret
< 0))
493 vrf_tx_error(skb
->dev
, skb
);
497 static int vrf_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
499 struct net_device
*dev
= skb_dst(skb
)->dev
;
501 IP_UPD_PO_STATS(net
, IPSTATS_MIB_OUT
, skb
->len
);
504 skb
->protocol
= htons(ETH_P_IP
);
506 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
507 net
, sk
, skb
, NULL
, dev
,
509 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
513 static void vrf_rtable_release(struct net_vrf
*vrf
)
515 struct rtable
*rth
= rtnl_dereference(vrf
->rth
);
516 struct rtable
*rth_local
= rtnl_dereference(vrf
->rth_local
);
518 RCU_INIT_POINTER(vrf
->rth
, NULL
);
519 RCU_INIT_POINTER(vrf
->rth_local
, NULL
);
523 dst_release(&rth
->dst
);
526 dst_release(&rth_local
->dst
);
529 static int vrf_rtable_create(struct net_device
*dev
)
531 struct net_vrf
*vrf
= netdev_priv(dev
);
532 struct rtable
*rth
, *rth_local
;
534 if (!fib_new_table(dev_net(dev
), vrf
->tb_id
))
537 /* create a dst for routing packets out through a VRF device */
538 rth
= rt_dst_alloc(dev
, 0, RTN_UNICAST
, 1, 1, 0);
542 /* create a dst for local ingress routing - packets sent locally
543 * to local address via the VRF device as a loopback
545 rth_local
= rt_dst_alloc(dev
, RTCF_LOCAL
, RTN_LOCAL
, 1, 1, 0);
547 dst_release(&rth
->dst
);
551 rth
->dst
.output
= vrf_output
;
552 rth
->rt_table_id
= vrf
->tb_id
;
554 rth_local
->rt_table_id
= vrf
->tb_id
;
556 rcu_assign_pointer(vrf
->rth
, rth
);
557 rcu_assign_pointer(vrf
->rth_local
, rth_local
);
562 /**************************** device handling ********************/
564 /* cycle interface to flush neighbor cache and move routes across tables */
565 static void cycle_netdev(struct net_device
*dev
)
567 unsigned int flags
= dev
->flags
;
570 if (!netif_running(dev
))
573 ret
= dev_change_flags(dev
, flags
& ~IFF_UP
);
575 ret
= dev_change_flags(dev
, flags
);
579 "Failed to cycle device %s; route tables might be wrong!\n",
584 static int do_vrf_add_slave(struct net_device
*dev
, struct net_device
*port_dev
)
588 ret
= netdev_master_upper_dev_link(port_dev
, dev
, NULL
, NULL
);
592 port_dev
->priv_flags
|= IFF_L3MDEV_SLAVE
;
593 cycle_netdev(port_dev
);
598 static int vrf_add_slave(struct net_device
*dev
, struct net_device
*port_dev
)
600 if (netif_is_l3_master(port_dev
) || netif_is_l3_slave(port_dev
))
603 return do_vrf_add_slave(dev
, port_dev
);
606 /* inverse of do_vrf_add_slave */
607 static int do_vrf_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
609 netdev_upper_dev_unlink(port_dev
, dev
);
610 port_dev
->priv_flags
&= ~IFF_L3MDEV_SLAVE
;
612 cycle_netdev(port_dev
);
617 static int vrf_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
619 return do_vrf_del_slave(dev
, port_dev
);
622 static void vrf_dev_uninit(struct net_device
*dev
)
624 struct net_vrf
*vrf
= netdev_priv(dev
);
625 struct net_device
*port_dev
;
626 struct list_head
*iter
;
628 vrf_rtable_release(vrf
);
629 vrf_rt6_release(vrf
);
631 netdev_for_each_lower_dev(dev
, port_dev
, iter
)
632 vrf_del_slave(dev
, port_dev
);
634 free_percpu(dev
->dstats
);
638 static int vrf_dev_init(struct net_device
*dev
)
640 struct net_vrf
*vrf
= netdev_priv(dev
);
642 dev
->dstats
= netdev_alloc_pcpu_stats(struct pcpu_dstats
);
646 /* create the default dst which points back to us */
647 if (vrf_rtable_create(dev
) != 0)
650 if (vrf_rt6_create(dev
) != 0)
653 dev
->flags
= IFF_MASTER
| IFF_NOARP
;
655 /* MTU is irrelevant for VRF device; set to 64k similar to lo */
656 dev
->mtu
= 64 * 1024;
658 /* similarly, oper state is irrelevant; set to up to avoid confusion */
659 dev
->operstate
= IF_OPER_UP
;
664 vrf_rtable_release(vrf
);
666 free_percpu(dev
->dstats
);
672 static const struct net_device_ops vrf_netdev_ops
= {
673 .ndo_init
= vrf_dev_init
,
674 .ndo_uninit
= vrf_dev_uninit
,
675 .ndo_start_xmit
= vrf_xmit
,
676 .ndo_get_stats64
= vrf_get_stats64
,
677 .ndo_add_slave
= vrf_add_slave
,
678 .ndo_del_slave
= vrf_del_slave
,
681 static u32
vrf_fib_table(const struct net_device
*dev
)
683 struct net_vrf
*vrf
= netdev_priv(dev
);
688 static struct rtable
*vrf_get_rtable(const struct net_device
*dev
,
689 const struct flowi4
*fl4
)
691 struct rtable
*rth
= NULL
;
693 if (!(fl4
->flowi4_flags
& FLOWI_FLAG_L3MDEV_SRC
)) {
694 struct net_vrf
*vrf
= netdev_priv(dev
);
698 rth
= rcu_dereference(vrf
->rth
);
708 /* called under rcu_read_lock */
709 static int vrf_get_saddr(struct net_device
*dev
, struct flowi4
*fl4
)
711 struct fib_result res
= { .tclassid
= 0 };
712 struct net
*net
= dev_net(dev
);
713 u32 orig_tos
= fl4
->flowi4_tos
;
714 u8 flags
= fl4
->flowi4_flags
;
715 u8 scope
= fl4
->flowi4_scope
;
716 u8 tos
= RT_FL_TOS(fl4
);
719 if (unlikely(!fl4
->daddr
))
722 fl4
->flowi4_flags
|= FLOWI_FLAG_SKIP_NH_OIF
;
723 fl4
->flowi4_iif
= LOOPBACK_IFINDEX
;
724 /* make sure oif is set to VRF device for lookup */
725 fl4
->flowi4_oif
= dev
->ifindex
;
726 fl4
->flowi4_tos
= tos
& IPTOS_RT_MASK
;
727 fl4
->flowi4_scope
= ((tos
& RTO_ONLINK
) ?
728 RT_SCOPE_LINK
: RT_SCOPE_UNIVERSE
);
730 rc
= fib_lookup(net
, fl4
, &res
, 0);
732 if (res
.type
== RTN_LOCAL
)
733 fl4
->saddr
= res
.fi
->fib_prefsrc
? : fl4
->daddr
;
735 fib_select_path(net
, &res
, fl4
, -1);
738 fl4
->flowi4_flags
= flags
;
739 fl4
->flowi4_tos
= orig_tos
;
740 fl4
->flowi4_scope
= scope
;
745 #if IS_ENABLED(CONFIG_IPV6)
746 /* neighbor handling is done with actual device; do not want
747 * to flip skb->dev for those ndisc packets. This really fails
748 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
751 static bool ipv6_ndisc_frame(const struct sk_buff
*skb
)
753 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
756 if (iph
->nexthdr
== NEXTHDR_ICMP
) {
757 const struct icmp6hdr
*icmph
;
758 struct icmp6hdr _icmph
;
760 icmph
= skb_header_pointer(skb
, sizeof(*iph
),
761 sizeof(_icmph
), &_icmph
);
765 switch (icmph
->icmp6_type
) {
766 case NDISC_ROUTER_SOLICITATION
:
767 case NDISC_ROUTER_ADVERTISEMENT
:
768 case NDISC_NEIGHBOUR_SOLICITATION
:
769 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
780 static struct sk_buff
*vrf_ip6_rcv(struct net_device
*vrf_dev
,
783 /* loopback traffic; do not push through packet taps again.
784 * Reset pkt_type for upper layers to process skb
786 if (skb
->pkt_type
== PACKET_LOOPBACK
) {
788 skb
->skb_iif
= vrf_dev
->ifindex
;
789 skb
->pkt_type
= PACKET_HOST
;
793 /* if packet is NDISC keep the ingress interface */
794 if (!ipv6_ndisc_frame(skb
)) {
796 skb
->skb_iif
= vrf_dev
->ifindex
;
798 skb_push(skb
, skb
->mac_len
);
799 dev_queue_xmit_nit(skb
, vrf_dev
);
800 skb_pull(skb
, skb
->mac_len
);
802 IP6CB(skb
)->flags
|= IP6SKB_L3SLAVE
;
810 static struct sk_buff
*vrf_ip6_rcv(struct net_device
*vrf_dev
,
817 static struct sk_buff
*vrf_ip_rcv(struct net_device
*vrf_dev
,
821 skb
->skb_iif
= vrf_dev
->ifindex
;
823 /* loopback traffic; do not push through packet taps again.
824 * Reset pkt_type for upper layers to process skb
826 if (skb
->pkt_type
== PACKET_LOOPBACK
) {
827 skb
->pkt_type
= PACKET_HOST
;
831 skb_push(skb
, skb
->mac_len
);
832 dev_queue_xmit_nit(skb
, vrf_dev
);
833 skb_pull(skb
, skb
->mac_len
);
839 /* called with rcu lock held */
840 static struct sk_buff
*vrf_l3_rcv(struct net_device
*vrf_dev
,
846 return vrf_ip_rcv(vrf_dev
, skb
);
848 return vrf_ip6_rcv(vrf_dev
, skb
);
854 #if IS_ENABLED(CONFIG_IPV6)
855 static struct dst_entry
*vrf_get_rt6_dst(const struct net_device
*dev
,
856 const struct flowi6
*fl6
)
858 struct dst_entry
*dst
= NULL
;
860 if (!(fl6
->flowi6_flags
& FLOWI_FLAG_L3MDEV_SRC
)) {
861 struct net_vrf
*vrf
= netdev_priv(dev
);
866 rt
= rcu_dereference(vrf
->rt6
);
879 static const struct l3mdev_ops vrf_l3mdev_ops
= {
880 .l3mdev_fib_table
= vrf_fib_table
,
881 .l3mdev_get_rtable
= vrf_get_rtable
,
882 .l3mdev_get_saddr
= vrf_get_saddr
,
883 .l3mdev_l3_rcv
= vrf_l3_rcv
,
884 #if IS_ENABLED(CONFIG_IPV6)
885 .l3mdev_get_rt6_dst
= vrf_get_rt6_dst
,
889 static void vrf_get_drvinfo(struct net_device
*dev
,
890 struct ethtool_drvinfo
*info
)
892 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
893 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
896 static const struct ethtool_ops vrf_ethtool_ops
= {
897 .get_drvinfo
= vrf_get_drvinfo
,
900 static void vrf_setup(struct net_device
*dev
)
904 /* Initialize the device structure. */
905 dev
->netdev_ops
= &vrf_netdev_ops
;
906 dev
->l3mdev_ops
= &vrf_l3mdev_ops
;
907 dev
->ethtool_ops
= &vrf_ethtool_ops
;
908 dev
->destructor
= free_netdev
;
910 /* Fill in device structure with ethernet-generic values. */
911 eth_hw_addr_random(dev
);
913 /* don't acquire vrf device's netif_tx_lock when transmitting */
914 dev
->features
|= NETIF_F_LLTX
;
916 /* don't allow vrf devices to change network namespaces. */
917 dev
->features
|= NETIF_F_NETNS_LOCAL
;
920 static int vrf_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
922 if (tb
[IFLA_ADDRESS
]) {
923 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
925 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
926 return -EADDRNOTAVAIL
;
931 static void vrf_dellink(struct net_device
*dev
, struct list_head
*head
)
933 unregister_netdevice_queue(dev
, head
);
936 static int vrf_newlink(struct net
*src_net
, struct net_device
*dev
,
937 struct nlattr
*tb
[], struct nlattr
*data
[])
939 struct net_vrf
*vrf
= netdev_priv(dev
);
941 if (!data
|| !data
[IFLA_VRF_TABLE
])
944 vrf
->tb_id
= nla_get_u32(data
[IFLA_VRF_TABLE
]);
946 dev
->priv_flags
|= IFF_L3MDEV_MASTER
;
948 return register_netdevice(dev
);
951 static size_t vrf_nl_getsize(const struct net_device
*dev
)
953 return nla_total_size(sizeof(u32
)); /* IFLA_VRF_TABLE */
956 static int vrf_fillinfo(struct sk_buff
*skb
,
957 const struct net_device
*dev
)
959 struct net_vrf
*vrf
= netdev_priv(dev
);
961 return nla_put_u32(skb
, IFLA_VRF_TABLE
, vrf
->tb_id
);
964 static size_t vrf_get_slave_size(const struct net_device
*bond_dev
,
965 const struct net_device
*slave_dev
)
967 return nla_total_size(sizeof(u32
)); /* IFLA_VRF_PORT_TABLE */
970 static int vrf_fill_slave_info(struct sk_buff
*skb
,
971 const struct net_device
*vrf_dev
,
972 const struct net_device
*slave_dev
)
974 struct net_vrf
*vrf
= netdev_priv(vrf_dev
);
976 if (nla_put_u32(skb
, IFLA_VRF_PORT_TABLE
, vrf
->tb_id
))
982 static const struct nla_policy vrf_nl_policy
[IFLA_VRF_MAX
+ 1] = {
983 [IFLA_VRF_TABLE
] = { .type
= NLA_U32
},
986 static struct rtnl_link_ops vrf_link_ops __read_mostly
= {
988 .priv_size
= sizeof(struct net_vrf
),
990 .get_size
= vrf_nl_getsize
,
991 .policy
= vrf_nl_policy
,
992 .validate
= vrf_validate
,
993 .fill_info
= vrf_fillinfo
,
995 .get_slave_size
= vrf_get_slave_size
,
996 .fill_slave_info
= vrf_fill_slave_info
,
998 .newlink
= vrf_newlink
,
999 .dellink
= vrf_dellink
,
1001 .maxtype
= IFLA_VRF_MAX
,
1004 static int vrf_device_event(struct notifier_block
*unused
,
1005 unsigned long event
, void *ptr
)
1007 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1009 /* only care about unregister events to drop slave references */
1010 if (event
== NETDEV_UNREGISTER
) {
1011 struct net_device
*vrf_dev
;
1013 if (!netif_is_l3_slave(dev
))
1016 vrf_dev
= netdev_master_upper_dev_get(dev
);
1017 vrf_del_slave(vrf_dev
, dev
);
1023 static struct notifier_block vrf_notifier_block __read_mostly
= {
1024 .notifier_call
= vrf_device_event
,
1027 static int __init
vrf_init_module(void)
1031 register_netdevice_notifier(&vrf_notifier_block
);
1033 rc
= rtnl_link_register(&vrf_link_ops
);
1040 unregister_netdevice_notifier(&vrf_notifier_block
);
1044 module_init(vrf_init_module
);
1045 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
1046 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
1047 MODULE_LICENSE("GPL");
1048 MODULE_ALIAS_RTNL_LINK(DRV_NAME
);
1049 MODULE_VERSION(DRV_VERSION
);