2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
50 #include <net/dst_metadata.h>
52 #if IS_ENABLED(CONFIG_IPV6)
54 #include <net/ip6_fib.h>
55 #include <net/ip6_route.h>
62 1. The most important issue is detecting local dead loops.
63 They would cause complete host lockup in transmit, which
64 would be "resolved" by stack overflow or, if queueing is enabled,
65 with infinite looping in net_bh.
67 We cannot track such dead loops during route installation,
68 it is infeasible task. The most general solutions would be
69 to keep skb->encapsulation counter (sort of local ttl),
70 and silently drop packet when it expires. It is a good
71 solution, but it supposes maintaining new variable in ALL
72 skb, even if no tunneling is used.
74 Current solution: xmit_recursion breaks dead loops. This is a percpu
75 counter, since when we enter the first ndo_xmit(), cpu migration is
76 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
78 2. Networking dead loops would not kill routers, but would really
79 kill network. IP hop limit plays role of "t->recursion" in this case,
80 if we copy it from packet being encapsulated to upper header.
81 It is very good solution, but it introduces two problems:
83 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
84 do not work over tunnels.
85 - traceroute does not work. I planned to relay ICMP from tunnel,
86 so that this problem would be solved and traceroute output
87 would even more informative. This idea appeared to be wrong:
88 only Linux complies to rfc1812 now (yes, guys, Linux is the only
89 true router now :-)), all routers (at least, in neighbourhood of mine)
90 return only 8 bytes of payload. It is the end.
92 Hence, if we want that OSPF worked or traceroute said something reasonable,
93 we should search for another solution.
95 One of them is to parse packet trying to detect inner encapsulation
96 made by our node. It is difficult or even impossible, especially,
97 taking into account fragmentation. TO be short, ttl is not solution at all.
99 Current solution: The solution was UNEXPECTEDLY SIMPLE.
100 We force DF flag on tunnels with preconfigured hop limit,
101 that is ALL. :-) Well, it does not remove the problem completely,
102 but exponential growth of network traffic is changed to linear
103 (branches, that exceed pmtu are pruned) and tunnel mtu
104 rapidly degrades to value <68, where looping stops.
105 Yes, it is not good if there exists a router in the loop,
106 which does not force DF, even when encapsulating packets have DF set.
107 But it is not our problem! Nobody could accuse us, we made
108 all that we could make. Even if it is your gated who injected
109 fatal route to network, even if it were you who configured
110 fatal static route: you are innocent. :-)
115 static bool log_ecn_error
= true;
116 module_param(log_ecn_error
, bool, 0644);
117 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
119 static struct rtnl_link_ops ipgre_link_ops __read_mostly
;
120 static int ipgre_tunnel_init(struct net_device
*dev
);
122 static int ipgre_net_id __read_mostly
;
123 static int gre_tap_net_id __read_mostly
;
125 static void ipgre_err(struct sk_buff
*skb
, u32 info
,
126 const struct tnl_ptk_info
*tpi
)
129 /* All the routers (except for Linux) return only
130 8 bytes of packet payload. It means, that precise relaying of
131 ICMP in the real Internet is absolutely infeasible.
133 Moreover, Cisco "wise men" put GRE key to the third word
134 in GRE header. It makes impossible maintaining even soft
135 state for keyed GRE tunnels with enabled checksum. Tell
138 Well, I wonder, rfc1812 was written by Cisco employee,
139 what the hell these idiots break standards established
142 struct net
*net
= dev_net(skb
->dev
);
143 struct ip_tunnel_net
*itn
;
144 const struct iphdr
*iph
;
145 const int type
= icmp_hdr(skb
)->type
;
146 const int code
= icmp_hdr(skb
)->code
;
151 case ICMP_PARAMETERPROB
:
154 case ICMP_DEST_UNREACH
:
157 case ICMP_PORT_UNREACH
:
158 /* Impossible event. */
161 /* All others are translated to HOST_UNREACH.
162 rfc2003 contains "deep thoughts" about NET_UNREACH,
163 I believe they are just ether pollution. --ANK
169 case ICMP_TIME_EXCEEDED
:
170 if (code
!= ICMP_EXC_TTL
)
178 if (tpi
->proto
== htons(ETH_P_TEB
))
179 itn
= net_generic(net
, gre_tap_net_id
);
181 itn
= net_generic(net
, ipgre_net_id
);
183 iph
= (const struct iphdr
*)(icmp_hdr(skb
) + 1);
184 t
= ip_tunnel_lookup(itn
, skb
->dev
->ifindex
, tpi
->flags
,
185 iph
->daddr
, iph
->saddr
, tpi
->key
);
190 if (t
->parms
.iph
.daddr
== 0 ||
191 ipv4_is_multicast(t
->parms
.iph
.daddr
))
194 if (t
->parms
.iph
.ttl
== 0 && type
== ICMP_TIME_EXCEEDED
)
197 if (time_before(jiffies
, t
->err_time
+ IPTUNNEL_ERR_TIMEO
))
201 t
->err_time
= jiffies
;
204 static void gre_err(struct sk_buff
*skb
, u32 info
)
206 /* All the routers (except for Linux) return only
207 * 8 bytes of packet payload. It means, that precise relaying of
208 * ICMP in the real Internet is absolutely infeasible.
210 * Moreover, Cisco "wise men" put GRE key to the third word
211 * in GRE header. It makes impossible maintaining even soft
213 * GRE tunnels with enabled checksum. Tell them "thank you".
215 * Well, I wonder, rfc1812 was written by Cisco employee,
216 * what the hell these idiots break standards established
220 const int type
= icmp_hdr(skb
)->type
;
221 const int code
= icmp_hdr(skb
)->code
;
222 struct tnl_ptk_info tpi
;
223 bool csum_err
= false;
225 if (gre_parse_header(skb
, &tpi
, &csum_err
, htons(ETH_P_IP
)) < 0) {
226 if (!csum_err
) /* ignore csum errors. */
230 if (type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
) {
231 ipv4_update_pmtu(skb
, dev_net(skb
->dev
), info
,
232 skb
->dev
->ifindex
, 0, IPPROTO_GRE
, 0);
235 if (type
== ICMP_REDIRECT
) {
236 ipv4_redirect(skb
, dev_net(skb
->dev
), skb
->dev
->ifindex
, 0,
241 ipgre_err(skb
, info
, &tpi
);
244 static __be64
key_to_tunnel_id(__be32 key
)
247 return (__force __be64
)((__force u32
)key
);
249 return (__force __be64
)((__force u64
)key
<< 32);
253 /* Returns the least-significant 32 bits of a __be64. */
254 static __be32
tunnel_id_to_key(__be64 x
)
257 return (__force __be32
)x
;
259 return (__force __be32
)((__force u64
)x
>> 32);
263 static int __ipgre_rcv(struct sk_buff
*skb
, const struct tnl_ptk_info
*tpi
,
264 struct ip_tunnel_net
*itn
, int hdr_len
, bool raw_proto
)
266 struct metadata_dst
*tun_dst
= NULL
;
267 const struct iphdr
*iph
;
268 struct ip_tunnel
*tunnel
;
271 tunnel
= ip_tunnel_lookup(itn
, skb
->dev
->ifindex
, tpi
->flags
,
272 iph
->saddr
, iph
->daddr
, tpi
->key
);
275 if (__iptunnel_pull_header(skb
, hdr_len
, tpi
->proto
,
276 raw_proto
, false) < 0)
279 skb_pop_mac_header(skb
);
280 if (tunnel
->collect_md
) {
284 flags
= tpi
->flags
& (TUNNEL_CSUM
| TUNNEL_KEY
);
285 tun_id
= key_to_tunnel_id(tpi
->key
);
286 tun_dst
= ip_tun_rx_dst(skb
, flags
, tun_id
, 0);
288 return PACKET_REJECT
;
291 ip_tunnel_rcv(tunnel
, skb
, tpi
, tun_dst
, log_ecn_error
);
301 static int ipgre_rcv(struct sk_buff
*skb
, const struct tnl_ptk_info
*tpi
,
304 struct net
*net
= dev_net(skb
->dev
);
305 struct ip_tunnel_net
*itn
;
308 if (tpi
->proto
== htons(ETH_P_TEB
))
309 itn
= net_generic(net
, gre_tap_net_id
);
311 itn
= net_generic(net
, ipgre_net_id
);
313 res
= __ipgre_rcv(skb
, tpi
, itn
, hdr_len
, false);
314 if (res
== PACKET_NEXT
&& tpi
->proto
== htons(ETH_P_TEB
)) {
315 /* ipgre tunnels in collect metadata mode should receive
316 * also ETH_P_TEB traffic.
318 itn
= net_generic(net
, ipgre_net_id
);
319 res
= __ipgre_rcv(skb
, tpi
, itn
, hdr_len
, true);
324 static int gre_rcv(struct sk_buff
*skb
)
326 struct tnl_ptk_info tpi
;
327 bool csum_err
= false;
330 #ifdef CONFIG_NET_IPGRE_BROADCAST
331 if (ipv4_is_multicast(ip_hdr(skb
)->daddr
)) {
332 /* Looped back packet, drop it! */
333 if (rt_is_output_route(skb_rtable(skb
)))
338 hdr_len
= gre_parse_header(skb
, &tpi
, &csum_err
, htons(ETH_P_IP
));
342 if (ipgre_rcv(skb
, &tpi
, hdr_len
) == PACKET_RCVD
)
345 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_PORT_UNREACH
, 0);
351 static void __gre_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
352 const struct iphdr
*tnl_params
,
355 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
357 if (tunnel
->parms
.o_flags
& TUNNEL_SEQ
)
360 /* Push GRE header. */
361 gre_build_header(skb
, tunnel
->tun_hlen
,
362 tunnel
->parms
.o_flags
, proto
, tunnel
->parms
.o_key
,
363 htonl(tunnel
->o_seqno
));
365 skb_set_inner_protocol(skb
, proto
);
366 ip_tunnel_xmit(skb
, dev
, tnl_params
, tnl_params
->protocol
);
369 static int gre_handle_offloads(struct sk_buff
*skb
, bool csum
)
371 return iptunnel_handle_offloads(skb
, csum
? SKB_GSO_GRE_CSUM
: SKB_GSO_GRE
);
374 static struct rtable
*gre_get_rt(struct sk_buff
*skb
,
375 struct net_device
*dev
,
377 const struct ip_tunnel_key
*key
)
379 struct net
*net
= dev_net(dev
);
381 memset(fl
, 0, sizeof(*fl
));
382 fl
->daddr
= key
->u
.ipv4
.dst
;
383 fl
->saddr
= key
->u
.ipv4
.src
;
384 fl
->flowi4_tos
= RT_TOS(key
->tos
);
385 fl
->flowi4_mark
= skb
->mark
;
386 fl
->flowi4_proto
= IPPROTO_GRE
;
388 return ip_route_output_key(net
, fl
);
391 static void gre_fb_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
394 struct ip_tunnel_info
*tun_info
;
395 const struct ip_tunnel_key
*key
;
396 struct rtable
*rt
= NULL
;
404 tun_info
= skb_tunnel_info(skb
);
405 if (unlikely(!tun_info
|| !(tun_info
->mode
& IP_TUNNEL_INFO_TX
) ||
406 ip_tunnel_info_af(tun_info
) != AF_INET
))
409 key
= &tun_info
->key
;
410 use_cache
= ip_tunnel_dst_cache_usable(skb
, tun_info
);
412 rt
= dst_cache_get_ip4(&tun_info
->dst_cache
, &fl
.saddr
);
414 rt
= gre_get_rt(skb
, dev
, &fl
, key
);
418 dst_cache_set_ip4(&tun_info
->dst_cache
, &rt
->dst
,
422 tunnel_hlen
= gre_calc_hlen(key
->tun_flags
);
424 min_headroom
= LL_RESERVED_SPACE(rt
->dst
.dev
) + rt
->dst
.header_len
425 + tunnel_hlen
+ sizeof(struct iphdr
);
426 if (skb_headroom(skb
) < min_headroom
|| skb_header_cloned(skb
)) {
427 int head_delta
= SKB_DATA_ALIGN(min_headroom
-
430 err
= pskb_expand_head(skb
, max_t(int, head_delta
, 0),
436 /* Push Tunnel header. */
437 if (gre_handle_offloads(skb
, !!(tun_info
->key
.tun_flags
& TUNNEL_CSUM
)))
440 flags
= tun_info
->key
.tun_flags
& (TUNNEL_CSUM
| TUNNEL_KEY
);
441 gre_build_header(skb
, tunnel_hlen
, flags
, proto
,
442 tunnel_id_to_key(tun_info
->key
.tun_id
), 0);
444 df
= key
->tun_flags
& TUNNEL_DONT_FRAGMENT
? htons(IP_DF
) : 0;
446 iptunnel_xmit(skb
->sk
, rt
, skb
, fl
.saddr
, key
->u
.ipv4
.dst
, IPPROTO_GRE
,
447 key
->tos
, key
->ttl
, df
, false);
454 dev
->stats
.tx_dropped
++;
457 static int gre_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
459 struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
463 if (ip_tunnel_info_af(info
) != AF_INET
)
466 rt
= gre_get_rt(skb
, dev
, &fl4
, &info
->key
);
471 info
->key
.u
.ipv4
.src
= fl4
.saddr
;
475 static netdev_tx_t
ipgre_xmit(struct sk_buff
*skb
,
476 struct net_device
*dev
)
478 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
479 const struct iphdr
*tnl_params
;
481 if (tunnel
->collect_md
) {
482 gre_fb_xmit(skb
, dev
, skb
->protocol
);
486 if (dev
->header_ops
) {
487 /* Need space for new headers */
488 if (skb_cow_head(skb
, dev
->needed_headroom
-
489 (tunnel
->hlen
+ sizeof(struct iphdr
))))
492 tnl_params
= (const struct iphdr
*)skb
->data
;
494 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
497 skb_pull(skb
, tunnel
->hlen
+ sizeof(struct iphdr
));
498 skb_reset_mac_header(skb
);
500 if (skb_cow_head(skb
, dev
->needed_headroom
))
503 tnl_params
= &tunnel
->parms
.iph
;
506 if (gre_handle_offloads(skb
, !!(tunnel
->parms
.o_flags
& TUNNEL_CSUM
)))
509 __gre_xmit(skb
, dev
, tnl_params
, skb
->protocol
);
514 dev
->stats
.tx_dropped
++;
518 static netdev_tx_t
gre_tap_xmit(struct sk_buff
*skb
,
519 struct net_device
*dev
)
521 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
523 if (tunnel
->collect_md
) {
524 gre_fb_xmit(skb
, dev
, htons(ETH_P_TEB
));
528 if (gre_handle_offloads(skb
, !!(tunnel
->parms
.o_flags
& TUNNEL_CSUM
)))
531 if (skb_cow_head(skb
, dev
->needed_headroom
))
534 __gre_xmit(skb
, dev
, &tunnel
->parms
.iph
, htons(ETH_P_TEB
));
539 dev
->stats
.tx_dropped
++;
543 static int ipgre_tunnel_ioctl(struct net_device
*dev
,
544 struct ifreq
*ifr
, int cmd
)
547 struct ip_tunnel_parm p
;
549 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
551 if (cmd
== SIOCADDTUNNEL
|| cmd
== SIOCCHGTUNNEL
) {
552 if (p
.iph
.version
!= 4 || p
.iph
.protocol
!= IPPROTO_GRE
||
553 p
.iph
.ihl
!= 5 || (p
.iph
.frag_off
&htons(~IP_DF
)) ||
554 ((p
.i_flags
|p
.o_flags
)&(GRE_VERSION
|GRE_ROUTING
)))
557 p
.i_flags
= gre_flags_to_tnl_flags(p
.i_flags
);
558 p
.o_flags
= gre_flags_to_tnl_flags(p
.o_flags
);
560 err
= ip_tunnel_ioctl(dev
, &p
, cmd
);
564 p
.i_flags
= gre_tnl_flags_to_gre_flags(p
.i_flags
);
565 p
.o_flags
= gre_tnl_flags_to_gre_flags(p
.o_flags
);
567 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &p
, sizeof(p
)))
572 /* Nice toy. Unfortunately, useless in real life :-)
573 It allows to construct virtual multiprotocol broadcast "LAN"
574 over the Internet, provided multicast routing is tuned.
577 I have no idea was this bicycle invented before me,
578 so that I had to set ARPHRD_IPGRE to a random value.
579 I have an impression, that Cisco could make something similar,
580 but this feature is apparently missing in IOS<=11.2(8).
582 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
583 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
585 ping -t 255 224.66.66.66
587 If nobody answers, mbone does not work.
589 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
590 ip addr add 10.66.66.<somewhat>/24 dev Universe
592 ifconfig Universe add fe80::<Your_real_addr>/10
593 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
596 ftp fec0:6666:6666::193.233.7.65
599 static int ipgre_header(struct sk_buff
*skb
, struct net_device
*dev
,
601 const void *daddr
, const void *saddr
, unsigned int len
)
603 struct ip_tunnel
*t
= netdev_priv(dev
);
605 struct gre_base_hdr
*greh
;
607 iph
= (struct iphdr
*)skb_push(skb
, t
->hlen
+ sizeof(*iph
));
608 greh
= (struct gre_base_hdr
*)(iph
+1);
609 greh
->flags
= gre_tnl_flags_to_gre_flags(t
->parms
.o_flags
);
610 greh
->protocol
= htons(type
);
612 memcpy(iph
, &t
->parms
.iph
, sizeof(struct iphdr
));
614 /* Set the source hardware address. */
616 memcpy(&iph
->saddr
, saddr
, 4);
618 memcpy(&iph
->daddr
, daddr
, 4);
620 return t
->hlen
+ sizeof(*iph
);
622 return -(t
->hlen
+ sizeof(*iph
));
625 static int ipgre_header_parse(const struct sk_buff
*skb
, unsigned char *haddr
)
627 const struct iphdr
*iph
= (const struct iphdr
*) skb_mac_header(skb
);
628 memcpy(haddr
, &iph
->saddr
, 4);
632 static const struct header_ops ipgre_header_ops
= {
633 .create
= ipgre_header
,
634 .parse
= ipgre_header_parse
,
637 #ifdef CONFIG_NET_IPGRE_BROADCAST
638 static int ipgre_open(struct net_device
*dev
)
640 struct ip_tunnel
*t
= netdev_priv(dev
);
642 if (ipv4_is_multicast(t
->parms
.iph
.daddr
)) {
646 rt
= ip_route_output_gre(t
->net
, &fl4
,
650 RT_TOS(t
->parms
.iph
.tos
),
653 return -EADDRNOTAVAIL
;
656 if (!__in_dev_get_rtnl(dev
))
657 return -EADDRNOTAVAIL
;
658 t
->mlink
= dev
->ifindex
;
659 ip_mc_inc_group(__in_dev_get_rtnl(dev
), t
->parms
.iph
.daddr
);
664 static int ipgre_close(struct net_device
*dev
)
666 struct ip_tunnel
*t
= netdev_priv(dev
);
668 if (ipv4_is_multicast(t
->parms
.iph
.daddr
) && t
->mlink
) {
669 struct in_device
*in_dev
;
670 in_dev
= inetdev_by_index(t
->net
, t
->mlink
);
672 ip_mc_dec_group(in_dev
, t
->parms
.iph
.daddr
);
678 static const struct net_device_ops ipgre_netdev_ops
= {
679 .ndo_init
= ipgre_tunnel_init
,
680 .ndo_uninit
= ip_tunnel_uninit
,
681 #ifdef CONFIG_NET_IPGRE_BROADCAST
682 .ndo_open
= ipgre_open
,
683 .ndo_stop
= ipgre_close
,
685 .ndo_start_xmit
= ipgre_xmit
,
686 .ndo_do_ioctl
= ipgre_tunnel_ioctl
,
687 .ndo_change_mtu
= ip_tunnel_change_mtu
,
688 .ndo_get_stats64
= ip_tunnel_get_stats64
,
689 .ndo_get_iflink
= ip_tunnel_get_iflink
,
692 #define GRE_FEATURES (NETIF_F_SG | \
697 static void ipgre_tunnel_setup(struct net_device
*dev
)
699 dev
->netdev_ops
= &ipgre_netdev_ops
;
700 dev
->type
= ARPHRD_IPGRE
;
701 ip_tunnel_setup(dev
, ipgre_net_id
);
704 static void __gre_tunnel_init(struct net_device
*dev
)
706 struct ip_tunnel
*tunnel
;
709 tunnel
= netdev_priv(dev
);
710 tunnel
->tun_hlen
= gre_calc_hlen(tunnel
->parms
.o_flags
);
711 tunnel
->parms
.iph
.protocol
= IPPROTO_GRE
;
713 tunnel
->hlen
= tunnel
->tun_hlen
+ tunnel
->encap_hlen
;
715 t_hlen
= tunnel
->hlen
+ sizeof(struct iphdr
);
717 dev
->needed_headroom
= LL_MAX_HEADER
+ t_hlen
+ 4;
718 dev
->mtu
= ETH_DATA_LEN
- t_hlen
- 4;
720 dev
->features
|= GRE_FEATURES
;
721 dev
->hw_features
|= GRE_FEATURES
;
723 if (!(tunnel
->parms
.o_flags
& TUNNEL_SEQ
)) {
724 /* TCP offload with GRE SEQ is not supported, nor
725 * can we support 2 levels of outer headers requiring
728 if (!(tunnel
->parms
.o_flags
& TUNNEL_CSUM
) ||
729 (tunnel
->encap
.type
== TUNNEL_ENCAP_NONE
)) {
730 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
731 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
734 /* Can use a lockless transmit, unless we generate
737 dev
->features
|= NETIF_F_LLTX
;
741 static int ipgre_tunnel_init(struct net_device
*dev
)
743 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
744 struct iphdr
*iph
= &tunnel
->parms
.iph
;
746 __gre_tunnel_init(dev
);
748 memcpy(dev
->dev_addr
, &iph
->saddr
, 4);
749 memcpy(dev
->broadcast
, &iph
->daddr
, 4);
751 dev
->flags
= IFF_NOARP
;
755 if (iph
->daddr
&& !tunnel
->collect_md
) {
756 #ifdef CONFIG_NET_IPGRE_BROADCAST
757 if (ipv4_is_multicast(iph
->daddr
)) {
760 dev
->flags
= IFF_BROADCAST
;
761 dev
->header_ops
= &ipgre_header_ops
;
764 } else if (!tunnel
->collect_md
) {
765 dev
->header_ops
= &ipgre_header_ops
;
768 return ip_tunnel_init(dev
);
771 static const struct gre_protocol ipgre_protocol
= {
773 .err_handler
= gre_err
,
776 static int __net_init
ipgre_init_net(struct net
*net
)
778 return ip_tunnel_init_net(net
, ipgre_net_id
, &ipgre_link_ops
, NULL
);
781 static void __net_exit
ipgre_exit_net(struct net
*net
)
783 struct ip_tunnel_net
*itn
= net_generic(net
, ipgre_net_id
);
784 ip_tunnel_delete_net(itn
, &ipgre_link_ops
);
787 static struct pernet_operations ipgre_net_ops
= {
788 .init
= ipgre_init_net
,
789 .exit
= ipgre_exit_net
,
791 .size
= sizeof(struct ip_tunnel_net
),
794 static int ipgre_tunnel_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
802 if (data
[IFLA_GRE_IFLAGS
])
803 flags
|= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
804 if (data
[IFLA_GRE_OFLAGS
])
805 flags
|= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
806 if (flags
& (GRE_VERSION
|GRE_ROUTING
))
809 if (data
[IFLA_GRE_COLLECT_METADATA
] &&
810 data
[IFLA_GRE_ENCAP_TYPE
] &&
811 nla_get_u16(data
[IFLA_GRE_ENCAP_TYPE
]) != TUNNEL_ENCAP_NONE
)
817 static int ipgre_tap_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
821 if (tb
[IFLA_ADDRESS
]) {
822 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
824 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
825 return -EADDRNOTAVAIL
;
831 if (data
[IFLA_GRE_REMOTE
]) {
832 memcpy(&daddr
, nla_data(data
[IFLA_GRE_REMOTE
]), 4);
838 return ipgre_tunnel_validate(tb
, data
);
841 static void ipgre_netlink_parms(struct net_device
*dev
,
842 struct nlattr
*data
[],
844 struct ip_tunnel_parm
*parms
)
846 memset(parms
, 0, sizeof(*parms
));
848 parms
->iph
.protocol
= IPPROTO_GRE
;
853 if (data
[IFLA_GRE_LINK
])
854 parms
->link
= nla_get_u32(data
[IFLA_GRE_LINK
]);
856 if (data
[IFLA_GRE_IFLAGS
])
857 parms
->i_flags
= gre_flags_to_tnl_flags(nla_get_be16(data
[IFLA_GRE_IFLAGS
]));
859 if (data
[IFLA_GRE_OFLAGS
])
860 parms
->o_flags
= gre_flags_to_tnl_flags(nla_get_be16(data
[IFLA_GRE_OFLAGS
]));
862 if (data
[IFLA_GRE_IKEY
])
863 parms
->i_key
= nla_get_be32(data
[IFLA_GRE_IKEY
]);
865 if (data
[IFLA_GRE_OKEY
])
866 parms
->o_key
= nla_get_be32(data
[IFLA_GRE_OKEY
]);
868 if (data
[IFLA_GRE_LOCAL
])
869 parms
->iph
.saddr
= nla_get_in_addr(data
[IFLA_GRE_LOCAL
]);
871 if (data
[IFLA_GRE_REMOTE
])
872 parms
->iph
.daddr
= nla_get_in_addr(data
[IFLA_GRE_REMOTE
]);
874 if (data
[IFLA_GRE_TTL
])
875 parms
->iph
.ttl
= nla_get_u8(data
[IFLA_GRE_TTL
]);
877 if (data
[IFLA_GRE_TOS
])
878 parms
->iph
.tos
= nla_get_u8(data
[IFLA_GRE_TOS
]);
880 if (!data
[IFLA_GRE_PMTUDISC
] || nla_get_u8(data
[IFLA_GRE_PMTUDISC
]))
881 parms
->iph
.frag_off
= htons(IP_DF
);
883 if (data
[IFLA_GRE_COLLECT_METADATA
]) {
884 struct ip_tunnel
*t
= netdev_priv(dev
);
886 t
->collect_md
= true;
890 /* This function returns true when ENCAP attributes are present in the nl msg */
891 static bool ipgre_netlink_encap_parms(struct nlattr
*data
[],
892 struct ip_tunnel_encap
*ipencap
)
896 memset(ipencap
, 0, sizeof(*ipencap
));
901 if (data
[IFLA_GRE_ENCAP_TYPE
]) {
903 ipencap
->type
= nla_get_u16(data
[IFLA_GRE_ENCAP_TYPE
]);
906 if (data
[IFLA_GRE_ENCAP_FLAGS
]) {
908 ipencap
->flags
= nla_get_u16(data
[IFLA_GRE_ENCAP_FLAGS
]);
911 if (data
[IFLA_GRE_ENCAP_SPORT
]) {
913 ipencap
->sport
= nla_get_be16(data
[IFLA_GRE_ENCAP_SPORT
]);
916 if (data
[IFLA_GRE_ENCAP_DPORT
]) {
918 ipencap
->dport
= nla_get_be16(data
[IFLA_GRE_ENCAP_DPORT
]);
924 static int gre_tap_init(struct net_device
*dev
)
926 __gre_tunnel_init(dev
);
927 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
929 return ip_tunnel_init(dev
);
932 static const struct net_device_ops gre_tap_netdev_ops
= {
933 .ndo_init
= gre_tap_init
,
934 .ndo_uninit
= ip_tunnel_uninit
,
935 .ndo_start_xmit
= gre_tap_xmit
,
936 .ndo_set_mac_address
= eth_mac_addr
,
937 .ndo_validate_addr
= eth_validate_addr
,
938 .ndo_change_mtu
= ip_tunnel_change_mtu
,
939 .ndo_get_stats64
= ip_tunnel_get_stats64
,
940 .ndo_get_iflink
= ip_tunnel_get_iflink
,
941 .ndo_fill_metadata_dst
= gre_fill_metadata_dst
,
944 static void ipgre_tap_setup(struct net_device
*dev
)
947 dev
->netdev_ops
= &gre_tap_netdev_ops
;
948 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
949 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
950 ip_tunnel_setup(dev
, gre_tap_net_id
);
953 static int ipgre_newlink(struct net
*src_net
, struct net_device
*dev
,
954 struct nlattr
*tb
[], struct nlattr
*data
[])
956 struct ip_tunnel_parm p
;
957 struct ip_tunnel_encap ipencap
;
959 if (ipgre_netlink_encap_parms(data
, &ipencap
)) {
960 struct ip_tunnel
*t
= netdev_priv(dev
);
961 int err
= ip_tunnel_encap_setup(t
, &ipencap
);
967 ipgre_netlink_parms(dev
, data
, tb
, &p
);
968 return ip_tunnel_newlink(dev
, tb
, &p
);
971 static int ipgre_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
972 struct nlattr
*data
[])
974 struct ip_tunnel_parm p
;
975 struct ip_tunnel_encap ipencap
;
977 if (ipgre_netlink_encap_parms(data
, &ipencap
)) {
978 struct ip_tunnel
*t
= netdev_priv(dev
);
979 int err
= ip_tunnel_encap_setup(t
, &ipencap
);
985 ipgre_netlink_parms(dev
, data
, tb
, &p
);
986 return ip_tunnel_changelink(dev
, tb
, &p
);
989 static size_t ipgre_get_size(const struct net_device
*dev
)
994 /* IFLA_GRE_IFLAGS */
996 /* IFLA_GRE_OFLAGS */
1002 /* IFLA_GRE_LOCAL */
1004 /* IFLA_GRE_REMOTE */
1010 /* IFLA_GRE_PMTUDISC */
1012 /* IFLA_GRE_ENCAP_TYPE */
1014 /* IFLA_GRE_ENCAP_FLAGS */
1016 /* IFLA_GRE_ENCAP_SPORT */
1018 /* IFLA_GRE_ENCAP_DPORT */
1020 /* IFLA_GRE_COLLECT_METADATA */
1025 static int ipgre_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
1027 struct ip_tunnel
*t
= netdev_priv(dev
);
1028 struct ip_tunnel_parm
*p
= &t
->parms
;
1030 if (nla_put_u32(skb
, IFLA_GRE_LINK
, p
->link
) ||
1031 nla_put_be16(skb
, IFLA_GRE_IFLAGS
,
1032 gre_tnl_flags_to_gre_flags(p
->i_flags
)) ||
1033 nla_put_be16(skb
, IFLA_GRE_OFLAGS
,
1034 gre_tnl_flags_to_gre_flags(p
->o_flags
)) ||
1035 nla_put_be32(skb
, IFLA_GRE_IKEY
, p
->i_key
) ||
1036 nla_put_be32(skb
, IFLA_GRE_OKEY
, p
->o_key
) ||
1037 nla_put_in_addr(skb
, IFLA_GRE_LOCAL
, p
->iph
.saddr
) ||
1038 nla_put_in_addr(skb
, IFLA_GRE_REMOTE
, p
->iph
.daddr
) ||
1039 nla_put_u8(skb
, IFLA_GRE_TTL
, p
->iph
.ttl
) ||
1040 nla_put_u8(skb
, IFLA_GRE_TOS
, p
->iph
.tos
) ||
1041 nla_put_u8(skb
, IFLA_GRE_PMTUDISC
,
1042 !!(p
->iph
.frag_off
& htons(IP_DF
))))
1043 goto nla_put_failure
;
1045 if (nla_put_u16(skb
, IFLA_GRE_ENCAP_TYPE
,
1047 nla_put_be16(skb
, IFLA_GRE_ENCAP_SPORT
,
1049 nla_put_be16(skb
, IFLA_GRE_ENCAP_DPORT
,
1051 nla_put_u16(skb
, IFLA_GRE_ENCAP_FLAGS
,
1053 goto nla_put_failure
;
1055 if (t
->collect_md
) {
1056 if (nla_put_flag(skb
, IFLA_GRE_COLLECT_METADATA
))
1057 goto nla_put_failure
;
1066 static const struct nla_policy ipgre_policy
[IFLA_GRE_MAX
+ 1] = {
1067 [IFLA_GRE_LINK
] = { .type
= NLA_U32
},
1068 [IFLA_GRE_IFLAGS
] = { .type
= NLA_U16
},
1069 [IFLA_GRE_OFLAGS
] = { .type
= NLA_U16
},
1070 [IFLA_GRE_IKEY
] = { .type
= NLA_U32
},
1071 [IFLA_GRE_OKEY
] = { .type
= NLA_U32
},
1072 [IFLA_GRE_LOCAL
] = { .len
= FIELD_SIZEOF(struct iphdr
, saddr
) },
1073 [IFLA_GRE_REMOTE
] = { .len
= FIELD_SIZEOF(struct iphdr
, daddr
) },
1074 [IFLA_GRE_TTL
] = { .type
= NLA_U8
},
1075 [IFLA_GRE_TOS
] = { .type
= NLA_U8
},
1076 [IFLA_GRE_PMTUDISC
] = { .type
= NLA_U8
},
1077 [IFLA_GRE_ENCAP_TYPE
] = { .type
= NLA_U16
},
1078 [IFLA_GRE_ENCAP_FLAGS
] = { .type
= NLA_U16
},
1079 [IFLA_GRE_ENCAP_SPORT
] = { .type
= NLA_U16
},
1080 [IFLA_GRE_ENCAP_DPORT
] = { .type
= NLA_U16
},
1081 [IFLA_GRE_COLLECT_METADATA
] = { .type
= NLA_FLAG
},
1084 static struct rtnl_link_ops ipgre_link_ops __read_mostly
= {
1086 .maxtype
= IFLA_GRE_MAX
,
1087 .policy
= ipgre_policy
,
1088 .priv_size
= sizeof(struct ip_tunnel
),
1089 .setup
= ipgre_tunnel_setup
,
1090 .validate
= ipgre_tunnel_validate
,
1091 .newlink
= ipgre_newlink
,
1092 .changelink
= ipgre_changelink
,
1093 .dellink
= ip_tunnel_dellink
,
1094 .get_size
= ipgre_get_size
,
1095 .fill_info
= ipgre_fill_info
,
1096 .get_link_net
= ip_tunnel_get_link_net
,
1099 static struct rtnl_link_ops ipgre_tap_ops __read_mostly
= {
1101 .maxtype
= IFLA_GRE_MAX
,
1102 .policy
= ipgre_policy
,
1103 .priv_size
= sizeof(struct ip_tunnel
),
1104 .setup
= ipgre_tap_setup
,
1105 .validate
= ipgre_tap_validate
,
1106 .newlink
= ipgre_newlink
,
1107 .changelink
= ipgre_changelink
,
1108 .dellink
= ip_tunnel_dellink
,
1109 .get_size
= ipgre_get_size
,
1110 .fill_info
= ipgre_fill_info
,
1111 .get_link_net
= ip_tunnel_get_link_net
,
1114 struct net_device
*gretap_fb_dev_create(struct net
*net
, const char *name
,
1115 u8 name_assign_type
)
1117 struct nlattr
*tb
[IFLA_MAX
+ 1];
1118 struct net_device
*dev
;
1119 struct ip_tunnel
*t
;
1122 memset(&tb
, 0, sizeof(tb
));
1124 dev
= rtnl_create_link(net
, name
, name_assign_type
,
1125 &ipgre_tap_ops
, tb
);
1129 /* Configure flow based GRE device. */
1130 t
= netdev_priv(dev
);
1131 t
->collect_md
= true;
1133 err
= ipgre_newlink(net
, dev
, tb
, NULL
);
1137 /* openvswitch users expect packet sizes to be unrestricted,
1138 * so set the largest MTU we can.
1140 err
= __ip_tunnel_change_mtu(dev
, IP_MAX_MTU
, false);
1147 return ERR_PTR(err
);
1149 EXPORT_SYMBOL_GPL(gretap_fb_dev_create
);
1151 static int __net_init
ipgre_tap_init_net(struct net
*net
)
1153 return ip_tunnel_init_net(net
, gre_tap_net_id
, &ipgre_tap_ops
, "gretap0");
1156 static void __net_exit
ipgre_tap_exit_net(struct net
*net
)
1158 struct ip_tunnel_net
*itn
= net_generic(net
, gre_tap_net_id
);
1159 ip_tunnel_delete_net(itn
, &ipgre_tap_ops
);
1162 static struct pernet_operations ipgre_tap_net_ops
= {
1163 .init
= ipgre_tap_init_net
,
1164 .exit
= ipgre_tap_exit_net
,
1165 .id
= &gre_tap_net_id
,
1166 .size
= sizeof(struct ip_tunnel_net
),
1169 static int __init
ipgre_init(void)
1173 pr_info("GRE over IPv4 tunneling driver\n");
1175 err
= register_pernet_device(&ipgre_net_ops
);
1179 err
= register_pernet_device(&ipgre_tap_net_ops
);
1181 goto pnet_tap_faied
;
1183 err
= gre_add_protocol(&ipgre_protocol
, GREPROTO_CISCO
);
1185 pr_info("%s: can't add protocol\n", __func__
);
1186 goto add_proto_failed
;
1189 err
= rtnl_link_register(&ipgre_link_ops
);
1191 goto rtnl_link_failed
;
1193 err
= rtnl_link_register(&ipgre_tap_ops
);
1195 goto tap_ops_failed
;
1200 rtnl_link_unregister(&ipgre_link_ops
);
1202 gre_del_protocol(&ipgre_protocol
, GREPROTO_CISCO
);
1204 unregister_pernet_device(&ipgre_tap_net_ops
);
1206 unregister_pernet_device(&ipgre_net_ops
);
1210 static void __exit
ipgre_fini(void)
1212 rtnl_link_unregister(&ipgre_tap_ops
);
1213 rtnl_link_unregister(&ipgre_link_ops
);
1214 gre_del_protocol(&ipgre_protocol
, GREPROTO_CISCO
);
1215 unregister_pernet_device(&ipgre_tap_net_ops
);
1216 unregister_pernet_device(&ipgre_net_ops
);
1219 module_init(ipgre_init
);
1220 module_exit(ipgre_fini
);
1221 MODULE_LICENSE("GPL");
1222 MODULE_ALIAS_RTNL_LINK("gre");
1223 MODULE_ALIAS_RTNL_LINK("gretap");
1224 MODULE_ALIAS_NETDEV("gre0");
1225 MODULE_ALIAS_NETDEV("gretap0");