2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/mroute.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
39 #include <net/protocol.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
51 #if IS_ENABLED(CONFIG_IPV6)
53 #include <net/ip6_fib.h>
54 #include <net/ip6_route.h>
61 1. The most important issue is detecting local dead loops.
62 They would cause complete host lockup in transmit, which
63 would be "resolved" by stack overflow or, if queueing is enabled,
64 with infinite looping in net_bh.
66 We cannot track such dead loops during route installation,
67 it is infeasible task. The most general solutions would be
68 to keep skb->encapsulation counter (sort of local ttl),
69 and silently drop packet when it expires. It is a good
70 solution, but it supposes maintaining new variable in ALL
71 skb, even if no tunneling is used.
73 Current solution: xmit_recursion breaks dead loops. This is a percpu
74 counter, since when we enter the first ndo_xmit(), cpu migration is
75 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
77 2. Networking dead loops would not kill routers, but would really
78 kill network. IP hop limit plays role of "t->recursion" in this case,
79 if we copy it from packet being encapsulated to upper header.
80 It is very good solution, but it introduces two problems:
82 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
83 do not work over tunnels.
84 - traceroute does not work. I planned to relay ICMP from tunnel,
85 so that this problem would be solved and traceroute output
86 would even more informative. This idea appeared to be wrong:
87 only Linux complies to rfc1812 now (yes, guys, Linux is the only
88 true router now :-)), all routers (at least, in neighbourhood of mine)
89 return only 8 bytes of payload. It is the end.
91 Hence, if we want that OSPF worked or traceroute said something reasonable,
92 we should search for another solution.
94 One of them is to parse packet trying to detect inner encapsulation
95 made by our node. It is difficult or even impossible, especially,
96 taking into account fragmentation. TO be short, ttl is not solution at all.
98 Current solution: The solution was UNEXPECTEDLY SIMPLE.
99 We force DF flag on tunnels with preconfigured hop limit,
100 that is ALL. :-) Well, it does not remove the problem completely,
101 but exponential growth of network traffic is changed to linear
102 (branches, that exceed pmtu are pruned) and tunnel mtu
103 rapidly degrades to value <68, where looping stops.
104 Yes, it is not good if there exists a router in the loop,
105 which does not force DF, even when encapsulating packets have DF set.
106 But it is not our problem! Nobody could accuse us, we made
107 all that we could make. Even if it is your gated who injected
108 fatal route to network, even if it were you who configured
109 fatal static route: you are innocent. :-)
113 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
114 practically identical code. It would be good to glue them
115 together, but it is not very evident, how to make them modular.
116 sit is integral part of IPv6, ipip and gre are naturally modular.
117 We could extract common parts (hash table, ioctl etc)
118 to a separate module (ip_tunnel.c).
123 static bool log_ecn_error
= true;
124 module_param(log_ecn_error
, bool, 0644);
125 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
127 static struct rtnl_link_ops ipgre_link_ops __read_mostly
;
128 static int ipgre_tunnel_init(struct net_device
*dev
);
129 static void ipgre_tunnel_setup(struct net_device
*dev
);
130 static int ipgre_tunnel_bind_dev(struct net_device
*dev
);
132 /* Fallback tunnel: no source, no destination, no key, no options */
136 static int ipgre_net_id __read_mostly
;
138 struct ip_tunnel __rcu
*tunnels
[4][HASH_SIZE
];
140 struct net_device
*fb_tunnel_dev
;
143 /* Tunnel hash table */
153 We require exact key match i.e. if a key is present in packet
154 it will match only tunnel with the same key; if it is not present,
155 it will match only keyless tunnel.
157 All keysless packets, if not matched configured keyless tunnels
158 will match fallback tunnel.
161 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
163 #define tunnels_r_l tunnels[3]
164 #define tunnels_r tunnels[2]
165 #define tunnels_l tunnels[1]
166 #define tunnels_wc tunnels[0]
168 static struct rtnl_link_stats64
*ipgre_get_stats64(struct net_device
*dev
,
169 struct rtnl_link_stats64
*tot
)
173 for_each_possible_cpu(i
) {
174 const struct pcpu_tstats
*tstats
= per_cpu_ptr(dev
->tstats
, i
);
175 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
179 start
= u64_stats_fetch_begin_bh(&tstats
->syncp
);
180 rx_packets
= tstats
->rx_packets
;
181 tx_packets
= tstats
->tx_packets
;
182 rx_bytes
= tstats
->rx_bytes
;
183 tx_bytes
= tstats
->tx_bytes
;
184 } while (u64_stats_fetch_retry_bh(&tstats
->syncp
, start
));
186 tot
->rx_packets
+= rx_packets
;
187 tot
->tx_packets
+= tx_packets
;
188 tot
->rx_bytes
+= rx_bytes
;
189 tot
->tx_bytes
+= tx_bytes
;
192 tot
->multicast
= dev
->stats
.multicast
;
193 tot
->rx_crc_errors
= dev
->stats
.rx_crc_errors
;
194 tot
->rx_fifo_errors
= dev
->stats
.rx_fifo_errors
;
195 tot
->rx_length_errors
= dev
->stats
.rx_length_errors
;
196 tot
->rx_frame_errors
= dev
->stats
.rx_frame_errors
;
197 tot
->rx_errors
= dev
->stats
.rx_errors
;
199 tot
->tx_fifo_errors
= dev
->stats
.tx_fifo_errors
;
200 tot
->tx_carrier_errors
= dev
->stats
.tx_carrier_errors
;
201 tot
->tx_dropped
= dev
->stats
.tx_dropped
;
202 tot
->tx_aborted_errors
= dev
->stats
.tx_aborted_errors
;
203 tot
->tx_errors
= dev
->stats
.tx_errors
;
208 /* Does key in tunnel parameters match packet */
209 static bool ipgre_key_match(const struct ip_tunnel_parm
*p
,
210 __be16 flags
, __be32 key
)
212 if (p
->i_flags
& GRE_KEY
) {
214 return key
== p
->i_key
;
216 return false; /* key expected, none present */
218 return !(flags
& GRE_KEY
);
221 /* Given src, dst and key, find appropriate for input tunnel. */
223 static struct ip_tunnel
*ipgre_tunnel_lookup(struct net_device
*dev
,
224 __be32 remote
, __be32 local
,
225 __be16 flags
, __be32 key
,
228 struct net
*net
= dev_net(dev
);
229 int link
= dev
->ifindex
;
230 unsigned int h0
= HASH(remote
);
231 unsigned int h1
= HASH(key
);
232 struct ip_tunnel
*t
, *cand
= NULL
;
233 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
234 int dev_type
= (gre_proto
== htons(ETH_P_TEB
)) ?
235 ARPHRD_ETHER
: ARPHRD_IPGRE
;
236 int score
, cand_score
= 4;
238 for_each_ip_tunnel_rcu(t
, ign
->tunnels_r_l
[h0
^ h1
]) {
239 if (local
!= t
->parms
.iph
.saddr
||
240 remote
!= t
->parms
.iph
.daddr
||
241 !(t
->dev
->flags
& IFF_UP
))
244 if (!ipgre_key_match(&t
->parms
, flags
, key
))
247 if (t
->dev
->type
!= ARPHRD_IPGRE
&&
248 t
->dev
->type
!= dev_type
)
252 if (t
->parms
.link
!= link
)
254 if (t
->dev
->type
!= dev_type
)
259 if (score
< cand_score
) {
265 for_each_ip_tunnel_rcu(t
, ign
->tunnels_r
[h0
^ h1
]) {
266 if (remote
!= t
->parms
.iph
.daddr
||
267 !(t
->dev
->flags
& IFF_UP
))
270 if (!ipgre_key_match(&t
->parms
, flags
, key
))
273 if (t
->dev
->type
!= ARPHRD_IPGRE
&&
274 t
->dev
->type
!= dev_type
)
278 if (t
->parms
.link
!= link
)
280 if (t
->dev
->type
!= dev_type
)
285 if (score
< cand_score
) {
291 for_each_ip_tunnel_rcu(t
, ign
->tunnels_l
[h1
]) {
292 if ((local
!= t
->parms
.iph
.saddr
&&
293 (local
!= t
->parms
.iph
.daddr
||
294 !ipv4_is_multicast(local
))) ||
295 !(t
->dev
->flags
& IFF_UP
))
298 if (!ipgre_key_match(&t
->parms
, flags
, key
))
301 if (t
->dev
->type
!= ARPHRD_IPGRE
&&
302 t
->dev
->type
!= dev_type
)
306 if (t
->parms
.link
!= link
)
308 if (t
->dev
->type
!= dev_type
)
313 if (score
< cand_score
) {
319 for_each_ip_tunnel_rcu(t
, ign
->tunnels_wc
[h1
]) {
320 if (t
->parms
.i_key
!= key
||
321 !(t
->dev
->flags
& IFF_UP
))
324 if (t
->dev
->type
!= ARPHRD_IPGRE
&&
325 t
->dev
->type
!= dev_type
)
329 if (t
->parms
.link
!= link
)
331 if (t
->dev
->type
!= dev_type
)
336 if (score
< cand_score
) {
345 dev
= ign
->fb_tunnel_dev
;
346 if (dev
->flags
& IFF_UP
)
347 return netdev_priv(dev
);
352 static struct ip_tunnel __rcu
**__ipgre_bucket(struct ipgre_net
*ign
,
353 struct ip_tunnel_parm
*parms
)
355 __be32 remote
= parms
->iph
.daddr
;
356 __be32 local
= parms
->iph
.saddr
;
357 __be32 key
= parms
->i_key
;
358 unsigned int h
= HASH(key
);
363 if (remote
&& !ipv4_is_multicast(remote
)) {
368 return &ign
->tunnels
[prio
][h
];
371 static inline struct ip_tunnel __rcu
**ipgre_bucket(struct ipgre_net
*ign
,
374 return __ipgre_bucket(ign
, &t
->parms
);
377 static void ipgre_tunnel_link(struct ipgre_net
*ign
, struct ip_tunnel
*t
)
379 struct ip_tunnel __rcu
**tp
= ipgre_bucket(ign
, t
);
381 rcu_assign_pointer(t
->next
, rtnl_dereference(*tp
));
382 rcu_assign_pointer(*tp
, t
);
385 static void ipgre_tunnel_unlink(struct ipgre_net
*ign
, struct ip_tunnel
*t
)
387 struct ip_tunnel __rcu
**tp
;
388 struct ip_tunnel
*iter
;
390 for (tp
= ipgre_bucket(ign
, t
);
391 (iter
= rtnl_dereference(*tp
)) != NULL
;
394 rcu_assign_pointer(*tp
, t
->next
);
400 static struct ip_tunnel
*ipgre_tunnel_find(struct net
*net
,
401 struct ip_tunnel_parm
*parms
,
404 __be32 remote
= parms
->iph
.daddr
;
405 __be32 local
= parms
->iph
.saddr
;
406 __be32 key
= parms
->i_key
;
407 int link
= parms
->link
;
409 struct ip_tunnel __rcu
**tp
;
410 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
412 for (tp
= __ipgre_bucket(ign
, parms
);
413 (t
= rtnl_dereference(*tp
)) != NULL
;
415 if (local
== t
->parms
.iph
.saddr
&&
416 remote
== t
->parms
.iph
.daddr
&&
417 key
== t
->parms
.i_key
&&
418 link
== t
->parms
.link
&&
419 type
== t
->dev
->type
)
425 static struct ip_tunnel
*ipgre_tunnel_locate(struct net
*net
,
426 struct ip_tunnel_parm
*parms
, int create
)
428 struct ip_tunnel
*t
, *nt
;
429 struct net_device
*dev
;
431 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
433 t
= ipgre_tunnel_find(net
, parms
, ARPHRD_IPGRE
);
438 strlcpy(name
, parms
->name
, IFNAMSIZ
);
440 strcpy(name
, "gre%d");
442 dev
= alloc_netdev(sizeof(*t
), name
, ipgre_tunnel_setup
);
446 dev_net_set(dev
, net
);
448 nt
= netdev_priv(dev
);
450 dev
->rtnl_link_ops
= &ipgre_link_ops
;
452 dev
->mtu
= ipgre_tunnel_bind_dev(dev
);
454 if (register_netdevice(dev
) < 0)
457 /* Can use a lockless transmit, unless we generate output sequences */
458 if (!(nt
->parms
.o_flags
& GRE_SEQ
))
459 dev
->features
|= NETIF_F_LLTX
;
462 ipgre_tunnel_link(ign
, nt
);
470 static void ipgre_tunnel_uninit(struct net_device
*dev
)
472 struct net
*net
= dev_net(dev
);
473 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
475 ipgre_tunnel_unlink(ign
, netdev_priv(dev
));
480 static void ipgre_err(struct sk_buff
*skb
, u32 info
)
483 /* All the routers (except for Linux) return only
484 8 bytes of packet payload. It means, that precise relaying of
485 ICMP in the real Internet is absolutely infeasible.
487 Moreover, Cisco "wise men" put GRE key to the third word
488 in GRE header. It makes impossible maintaining even soft state for keyed
489 GRE tunnels with enabled checksum. Tell them "thank you".
491 Well, I wonder, rfc1812 was written by Cisco employee,
492 what the hell these idiots break standards established
496 const struct iphdr
*iph
= (const struct iphdr
*)skb
->data
;
497 __be16
*p
= (__be16
*)(skb
->data
+(iph
->ihl
<<2));
498 int grehlen
= (iph
->ihl
<<2) + 4;
499 const int type
= icmp_hdr(skb
)->type
;
500 const int code
= icmp_hdr(skb
)->code
;
506 if (flags
&(GRE_CSUM
|GRE_KEY
|GRE_SEQ
|GRE_ROUTING
|GRE_VERSION
)) {
507 if (flags
&(GRE_VERSION
|GRE_ROUTING
))
516 /* If only 8 bytes returned, keyed message will be dropped here */
517 if (skb_headlen(skb
) < grehlen
)
521 key
= *(((__be32
*)p
) + (grehlen
/ 4) - 1);
525 case ICMP_PARAMETERPROB
:
528 case ICMP_DEST_UNREACH
:
531 case ICMP_PORT_UNREACH
:
532 /* Impossible event. */
535 /* All others are translated to HOST_UNREACH.
536 rfc2003 contains "deep thoughts" about NET_UNREACH,
537 I believe they are just ether pollution. --ANK
542 case ICMP_TIME_EXCEEDED
:
543 if (code
!= ICMP_EXC_TTL
)
551 t
= ipgre_tunnel_lookup(skb
->dev
, iph
->daddr
, iph
->saddr
,
557 if (type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
) {
558 ipv4_update_pmtu(skb
, dev_net(skb
->dev
), info
,
559 t
->parms
.link
, 0, IPPROTO_GRE
, 0);
562 if (type
== ICMP_REDIRECT
) {
563 ipv4_redirect(skb
, dev_net(skb
->dev
), t
->parms
.link
, 0,
567 if (t
->parms
.iph
.daddr
== 0 ||
568 ipv4_is_multicast(t
->parms
.iph
.daddr
))
571 if (t
->parms
.iph
.ttl
== 0 && type
== ICMP_TIME_EXCEEDED
)
574 if (time_before(jiffies
, t
->err_time
+ IPTUNNEL_ERR_TIMEO
))
578 t
->err_time
= jiffies
;
582 ipgre_ecn_encapsulate(u8 tos
, const struct iphdr
*old_iph
, struct sk_buff
*skb
)
585 if (skb
->protocol
== htons(ETH_P_IP
))
586 inner
= old_iph
->tos
;
587 else if (skb
->protocol
== htons(ETH_P_IPV6
))
588 inner
= ipv6_get_dsfield((const struct ipv6hdr
*)old_iph
);
589 return INET_ECN_encapsulate(tos
, inner
);
592 static int ipgre_rcv(struct sk_buff
*skb
)
594 const struct iphdr
*iph
;
600 struct ip_tunnel
*tunnel
;
605 if (!pskb_may_pull(skb
, 16))
610 flags
= *(__be16
*)h
;
612 if (flags
&(GRE_CSUM
|GRE_KEY
|GRE_ROUTING
|GRE_SEQ
|GRE_VERSION
)) {
613 /* - Version must be 0.
614 - We do not support routing headers.
616 if (flags
&(GRE_VERSION
|GRE_ROUTING
))
619 if (flags
&GRE_CSUM
) {
620 switch (skb
->ip_summed
) {
621 case CHECKSUM_COMPLETE
:
622 csum
= csum_fold(skb
->csum
);
628 csum
= __skb_checksum_complete(skb
);
629 skb
->ip_summed
= CHECKSUM_COMPLETE
;
634 key
= *(__be32
*)(h
+ offset
);
638 seqno
= ntohl(*(__be32
*)(h
+ offset
));
643 gre_proto
= *(__be16
*)(h
+ 2);
645 tunnel
= ipgre_tunnel_lookup(skb
->dev
,
646 iph
->saddr
, iph
->daddr
, flags
, key
,
649 struct pcpu_tstats
*tstats
;
653 skb
->protocol
= gre_proto
;
654 /* WCCP version 1 and 2 protocol decoding.
655 * - Change protocol to IP
656 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
658 if (flags
== 0 && gre_proto
== htons(ETH_P_WCCP
)) {
659 skb
->protocol
= htons(ETH_P_IP
);
660 if ((*(h
+ offset
) & 0xF0) != 0x40)
664 skb
->mac_header
= skb
->network_header
;
665 __pskb_pull(skb
, offset
);
666 skb_postpull_rcsum(skb
, skb_transport_header(skb
), offset
);
667 skb
->pkt_type
= PACKET_HOST
;
668 #ifdef CONFIG_NET_IPGRE_BROADCAST
669 if (ipv4_is_multicast(iph
->daddr
)) {
670 /* Looped back packet, drop it! */
671 if (rt_is_output_route(skb_rtable(skb
)))
673 tunnel
->dev
->stats
.multicast
++;
674 skb
->pkt_type
= PACKET_BROADCAST
;
678 if (((flags
&GRE_CSUM
) && csum
) ||
679 (!(flags
&GRE_CSUM
) && tunnel
->parms
.i_flags
&GRE_CSUM
)) {
680 tunnel
->dev
->stats
.rx_crc_errors
++;
681 tunnel
->dev
->stats
.rx_errors
++;
684 if (tunnel
->parms
.i_flags
&GRE_SEQ
) {
685 if (!(flags
&GRE_SEQ
) ||
686 (tunnel
->i_seqno
&& (s32
)(seqno
- tunnel
->i_seqno
) < 0)) {
687 tunnel
->dev
->stats
.rx_fifo_errors
++;
688 tunnel
->dev
->stats
.rx_errors
++;
691 tunnel
->i_seqno
= seqno
+ 1;
694 /* Warning: All skb pointers will be invalidated! */
695 if (tunnel
->dev
->type
== ARPHRD_ETHER
) {
696 if (!pskb_may_pull(skb
, ETH_HLEN
)) {
697 tunnel
->dev
->stats
.rx_length_errors
++;
698 tunnel
->dev
->stats
.rx_errors
++;
703 skb
->protocol
= eth_type_trans(skb
, tunnel
->dev
);
704 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
707 __skb_tunnel_rx(skb
, tunnel
->dev
);
709 skb_reset_network_header(skb
);
710 err
= IP_ECN_decapsulate(iph
, skb
);
713 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
714 &iph
->saddr
, iph
->tos
);
716 ++tunnel
->dev
->stats
.rx_frame_errors
;
717 ++tunnel
->dev
->stats
.rx_errors
;
722 tstats
= this_cpu_ptr(tunnel
->dev
->tstats
);
723 u64_stats_update_begin(&tstats
->syncp
);
724 tstats
->rx_packets
++;
725 tstats
->rx_bytes
+= skb
->len
;
726 u64_stats_update_end(&tstats
->syncp
);
728 gro_cells_receive(&tunnel
->gro_cells
, skb
);
731 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_PORT_UNREACH
, 0);
738 static struct sk_buff
*handle_offloads(struct ip_tunnel
*tunnel
, struct sk_buff
*skb
)
742 if (skb_is_gso(skb
)) {
743 err
= skb_unclone(skb
, GFP_ATOMIC
);
746 skb_shinfo(skb
)->gso_type
|= SKB_GSO_GRE
;
748 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
749 tunnel
->parms
.o_flags
&GRE_CSUM
) {
750 err
= skb_checksum_help(skb
);
753 } else if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
754 skb
->ip_summed
= CHECKSUM_NONE
;
763 static netdev_tx_t
ipgre_tunnel_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
765 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
766 const struct iphdr
*old_iph
;
767 const struct iphdr
*tiph
;
771 struct rtable
*rt
; /* Route to the other host */
772 struct net_device
*tdev
; /* Device to other host */
773 struct iphdr
*iph
; /* Our new IP header */
774 unsigned int max_headroom
; /* The extra header space needed */
781 skb
= handle_offloads(tunnel
, skb
);
783 dev
->stats
.tx_dropped
++;
787 if (!skb
->encapsulation
) {
788 skb_reset_inner_headers(skb
);
789 skb
->encapsulation
= 1;
792 old_iph
= ip_hdr(skb
);
794 if (dev
->type
== ARPHRD_ETHER
)
795 IPCB(skb
)->flags
= 0;
797 if (dev
->header_ops
&& dev
->type
== ARPHRD_IPGRE
) {
799 if (skb
->protocol
== htons(ETH_P_IP
))
800 tiph
= (const struct iphdr
*)skb
->data
;
802 tiph
= &tunnel
->parms
.iph
;
804 gre_hlen
= tunnel
->hlen
;
805 tiph
= &tunnel
->parms
.iph
;
808 if ((dst
= tiph
->daddr
) == 0) {
811 if (skb_dst(skb
) == NULL
) {
812 dev
->stats
.tx_fifo_errors
++;
816 if (skb
->protocol
== htons(ETH_P_IP
)) {
817 rt
= skb_rtable(skb
);
818 dst
= rt_nexthop(rt
, old_iph
->daddr
);
820 #if IS_ENABLED(CONFIG_IPV6)
821 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
822 const struct in6_addr
*addr6
;
823 struct neighbour
*neigh
;
824 bool do_tx_error_icmp
;
827 neigh
= dst_neigh_lookup(skb_dst(skb
), &ipv6_hdr(skb
)->daddr
);
831 addr6
= (const struct in6_addr
*)&neigh
->primary_key
;
832 addr_type
= ipv6_addr_type(addr6
);
834 if (addr_type
== IPV6_ADDR_ANY
) {
835 addr6
= &ipv6_hdr(skb
)->daddr
;
836 addr_type
= ipv6_addr_type(addr6
);
839 if ((addr_type
& IPV6_ADDR_COMPATv4
) == 0)
840 do_tx_error_icmp
= true;
842 do_tx_error_icmp
= false;
843 dst
= addr6
->s6_addr32
[3];
845 neigh_release(neigh
);
846 if (do_tx_error_icmp
)
858 if (skb
->protocol
== htons(ETH_P_IP
))
860 else if (skb
->protocol
== htons(ETH_P_IPV6
))
861 tos
= ipv6_get_dsfield((const struct ipv6hdr
*)old_iph
);
864 rt
= ip_route_output_gre(dev_net(dev
), &fl4
, dst
, tiph
->saddr
,
865 tunnel
->parms
.o_key
, RT_TOS(tos
),
868 dev
->stats
.tx_carrier_errors
++;
875 dev
->stats
.collisions
++;
881 mtu
= dst_mtu(&rt
->dst
) - dev
->hard_header_len
- tunnel
->hlen
;
883 mtu
= skb_dst(skb
) ? dst_mtu(skb_dst(skb
)) : dev
->mtu
;
886 skb_dst(skb
)->ops
->update_pmtu(skb_dst(skb
), NULL
, skb
, mtu
);
888 if (skb
->protocol
== htons(ETH_P_IP
)) {
889 df
|= (old_iph
->frag_off
&htons(IP_DF
));
891 if (!skb_is_gso(skb
) &&
892 (old_iph
->frag_off
&htons(IP_DF
)) &&
893 mtu
< ntohs(old_iph
->tot_len
)) {
894 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
, htonl(mtu
));
899 #if IS_ENABLED(CONFIG_IPV6)
900 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
901 struct rt6_info
*rt6
= (struct rt6_info
*)skb_dst(skb
);
903 if (rt6
&& mtu
< dst_mtu(skb_dst(skb
)) && mtu
>= IPV6_MIN_MTU
) {
904 if ((tunnel
->parms
.iph
.daddr
&&
905 !ipv4_is_multicast(tunnel
->parms
.iph
.daddr
)) ||
906 rt6
->rt6i_dst
.plen
== 128) {
907 rt6
->rt6i_flags
|= RTF_MODIFIED
;
908 dst_metric_set(skb_dst(skb
), RTAX_MTU
, mtu
);
912 if (!skb_is_gso(skb
) &&
913 mtu
>= IPV6_MIN_MTU
&&
914 mtu
< skb
->len
- tunnel
->hlen
+ gre_hlen
) {
915 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
922 if (tunnel
->err_count
> 0) {
923 if (time_before(jiffies
,
924 tunnel
->err_time
+ IPTUNNEL_ERR_TIMEO
)) {
927 dst_link_failure(skb
);
929 tunnel
->err_count
= 0;
932 max_headroom
= LL_RESERVED_SPACE(tdev
) + gre_hlen
+ rt
->dst
.header_len
;
934 if (skb_headroom(skb
) < max_headroom
|| skb_shared(skb
)||
935 (skb_cloned(skb
) && !skb_clone_writable(skb
, 0))) {
936 struct sk_buff
*new_skb
= skb_realloc_headroom(skb
, max_headroom
);
937 if (max_headroom
> dev
->needed_headroom
)
938 dev
->needed_headroom
= max_headroom
;
941 dev
->stats
.tx_dropped
++;
946 skb_set_owner_w(new_skb
, skb
->sk
);
949 old_iph
= ip_hdr(skb
);
950 /* Warning : tiph value might point to freed memory */
953 skb_push(skb
, gre_hlen
);
954 skb_reset_network_header(skb
);
955 skb_set_transport_header(skb
, sizeof(*iph
));
956 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
957 IPCB(skb
)->flags
&= ~(IPSKB_XFRM_TUNNEL_SIZE
| IPSKB_XFRM_TRANSFORMED
|
960 skb_dst_set(skb
, &rt
->dst
);
963 * Push down and install the IPIP header.
968 iph
->ihl
= sizeof(struct iphdr
) >> 2;
970 iph
->protocol
= IPPROTO_GRE
;
971 iph
->tos
= ipgre_ecn_encapsulate(tos
, old_iph
, skb
);
972 iph
->daddr
= fl4
.daddr
;
973 iph
->saddr
= fl4
.saddr
;
976 tunnel_ip_select_ident(skb
, old_iph
, &rt
->dst
);
979 if (skb
->protocol
== htons(ETH_P_IP
))
980 iph
->ttl
= old_iph
->ttl
;
981 #if IS_ENABLED(CONFIG_IPV6)
982 else if (skb
->protocol
== htons(ETH_P_IPV6
))
983 iph
->ttl
= ((const struct ipv6hdr
*)old_iph
)->hop_limit
;
986 iph
->ttl
= ip4_dst_hoplimit(&rt
->dst
);
989 ((__be16
*)(iph
+ 1))[0] = tunnel
->parms
.o_flags
;
990 ((__be16
*)(iph
+ 1))[1] = (dev
->type
== ARPHRD_ETHER
) ?
991 htons(ETH_P_TEB
) : skb
->protocol
;
993 if (tunnel
->parms
.o_flags
&(GRE_KEY
|GRE_CSUM
|GRE_SEQ
)) {
994 __be32
*ptr
= (__be32
*)(((u8
*)iph
) + tunnel
->hlen
- 4);
996 if (tunnel
->parms
.o_flags
&GRE_SEQ
) {
998 *ptr
= htonl(tunnel
->o_seqno
);
1001 if (tunnel
->parms
.o_flags
&GRE_KEY
) {
1002 *ptr
= tunnel
->parms
.o_key
;
1005 /* Skip GRE checksum if skb is getting offloaded. */
1006 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_GRE
) &&
1007 (tunnel
->parms
.o_flags
&GRE_CSUM
)) {
1008 int offset
= skb_transport_offset(skb
);
1010 if (skb_has_shared_frag(skb
)) {
1011 err
= __skb_linearize(skb
);
1017 *(__sum16
*)ptr
= csum_fold(skb_checksum(skb
, offset
,
1023 iptunnel_xmit(skb
, dev
);
1024 return NETDEV_TX_OK
;
1026 #if IS_ENABLED(CONFIG_IPV6)
1028 dst_link_failure(skb
);
1031 dev
->stats
.tx_errors
++;
1033 return NETDEV_TX_OK
;
1036 static int ipgre_tunnel_bind_dev(struct net_device
*dev
)
1038 struct net_device
*tdev
= NULL
;
1039 struct ip_tunnel
*tunnel
;
1040 const struct iphdr
*iph
;
1041 int hlen
= LL_MAX_HEADER
;
1042 int mtu
= ETH_DATA_LEN
;
1043 int addend
= sizeof(struct iphdr
) + 4;
1045 tunnel
= netdev_priv(dev
);
1046 iph
= &tunnel
->parms
.iph
;
1048 /* Guess output device to choose reasonable mtu and needed_headroom */
1054 rt
= ip_route_output_gre(dev_net(dev
), &fl4
,
1055 iph
->daddr
, iph
->saddr
,
1056 tunnel
->parms
.o_key
,
1058 tunnel
->parms
.link
);
1064 if (dev
->type
!= ARPHRD_ETHER
)
1065 dev
->flags
|= IFF_POINTOPOINT
;
1068 if (!tdev
&& tunnel
->parms
.link
)
1069 tdev
= __dev_get_by_index(dev_net(dev
), tunnel
->parms
.link
);
1072 hlen
= tdev
->hard_header_len
+ tdev
->needed_headroom
;
1075 dev
->iflink
= tunnel
->parms
.link
;
1077 /* Precalculate GRE options length */
1078 if (tunnel
->parms
.o_flags
&(GRE_CSUM
|GRE_KEY
|GRE_SEQ
)) {
1079 if (tunnel
->parms
.o_flags
&GRE_CSUM
)
1081 if (tunnel
->parms
.o_flags
&GRE_KEY
)
1083 if (tunnel
->parms
.o_flags
&GRE_SEQ
)
1086 dev
->needed_headroom
= addend
+ hlen
;
1087 mtu
-= dev
->hard_header_len
+ addend
;
1092 tunnel
->hlen
= addend
;
1093 /* TCP offload with GRE SEQ is not supported. */
1094 if (!(tunnel
->parms
.o_flags
& GRE_SEQ
)) {
1095 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
1096 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
1103 ipgre_tunnel_ioctl (struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1106 struct ip_tunnel_parm p
;
1107 struct ip_tunnel
*t
;
1108 struct net
*net
= dev_net(dev
);
1109 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
1114 if (dev
== ign
->fb_tunnel_dev
) {
1115 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
))) {
1119 t
= ipgre_tunnel_locate(net
, &p
, 0);
1122 t
= netdev_priv(dev
);
1123 memcpy(&p
, &t
->parms
, sizeof(p
));
1124 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &p
, sizeof(p
)))
1131 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1135 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
1139 if (p
.iph
.version
!= 4 || p
.iph
.protocol
!= IPPROTO_GRE
||
1140 p
.iph
.ihl
!= 5 || (p
.iph
.frag_off
&htons(~IP_DF
)) ||
1141 ((p
.i_flags
|p
.o_flags
)&(GRE_VERSION
|GRE_ROUTING
)))
1144 p
.iph
.frag_off
|= htons(IP_DF
);
1146 if (!(p
.i_flags
&GRE_KEY
))
1148 if (!(p
.o_flags
&GRE_KEY
))
1151 t
= ipgre_tunnel_locate(net
, &p
, cmd
== SIOCADDTUNNEL
);
1153 if (dev
!= ign
->fb_tunnel_dev
&& cmd
== SIOCCHGTUNNEL
) {
1155 if (t
->dev
!= dev
) {
1160 unsigned int nflags
= 0;
1162 t
= netdev_priv(dev
);
1164 if (ipv4_is_multicast(p
.iph
.daddr
))
1165 nflags
= IFF_BROADCAST
;
1166 else if (p
.iph
.daddr
)
1167 nflags
= IFF_POINTOPOINT
;
1169 if ((dev
->flags
^nflags
)&(IFF_POINTOPOINT
|IFF_BROADCAST
)) {
1173 ipgre_tunnel_unlink(ign
, t
);
1175 t
->parms
.iph
.saddr
= p
.iph
.saddr
;
1176 t
->parms
.iph
.daddr
= p
.iph
.daddr
;
1177 t
->parms
.i_key
= p
.i_key
;
1178 t
->parms
.o_key
= p
.o_key
;
1179 memcpy(dev
->dev_addr
, &p
.iph
.saddr
, 4);
1180 memcpy(dev
->broadcast
, &p
.iph
.daddr
, 4);
1181 ipgre_tunnel_link(ign
, t
);
1182 netdev_state_change(dev
);
1188 if (cmd
== SIOCCHGTUNNEL
) {
1189 t
->parms
.iph
.ttl
= p
.iph
.ttl
;
1190 t
->parms
.iph
.tos
= p
.iph
.tos
;
1191 t
->parms
.iph
.frag_off
= p
.iph
.frag_off
;
1192 if (t
->parms
.link
!= p
.link
) {
1193 t
->parms
.link
= p
.link
;
1194 dev
->mtu
= ipgre_tunnel_bind_dev(dev
);
1195 netdev_state_change(dev
);
1198 if (copy_to_user(ifr
->ifr_ifru
.ifru_data
, &t
->parms
, sizeof(p
)))
1201 err
= (cmd
== SIOCADDTUNNEL
? -ENOBUFS
: -ENOENT
);
1206 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1209 if (dev
== ign
->fb_tunnel_dev
) {
1211 if (copy_from_user(&p
, ifr
->ifr_ifru
.ifru_data
, sizeof(p
)))
1214 if ((t
= ipgre_tunnel_locate(net
, &p
, 0)) == NULL
)
1217 if (t
== netdev_priv(ign
->fb_tunnel_dev
))
1221 unregister_netdevice(dev
);
1233 static int ipgre_tunnel_change_mtu(struct net_device
*dev
, int new_mtu
)
1235 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1237 new_mtu
> 0xFFF8 - dev
->hard_header_len
- tunnel
->hlen
)
1243 /* Nice toy. Unfortunately, useless in real life :-)
1244 It allows to construct virtual multiprotocol broadcast "LAN"
1245 over the Internet, provided multicast routing is tuned.
1248 I have no idea was this bicycle invented before me,
1249 so that I had to set ARPHRD_IPGRE to a random value.
1250 I have an impression, that Cisco could make something similar,
1251 but this feature is apparently missing in IOS<=11.2(8).
1253 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1254 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1256 ping -t 255 224.66.66.66
1258 If nobody answers, mbone does not work.
1260 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1261 ip addr add 10.66.66.<somewhat>/24 dev Universe
1262 ifconfig Universe up
1263 ifconfig Universe add fe80::<Your_real_addr>/10
1264 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1267 ftp fec0:6666:6666::193.233.7.65
1272 static int ipgre_header(struct sk_buff
*skb
, struct net_device
*dev
,
1273 unsigned short type
,
1274 const void *daddr
, const void *saddr
, unsigned int len
)
1276 struct ip_tunnel
*t
= netdev_priv(dev
);
1277 struct iphdr
*iph
= (struct iphdr
*)skb_push(skb
, t
->hlen
);
1278 __be16
*p
= (__be16
*)(iph
+1);
1280 memcpy(iph
, &t
->parms
.iph
, sizeof(struct iphdr
));
1281 p
[0] = t
->parms
.o_flags
;
1285 * Set the source hardware address.
1289 memcpy(&iph
->saddr
, saddr
, 4);
1291 memcpy(&iph
->daddr
, daddr
, 4);
1298 static int ipgre_header_parse(const struct sk_buff
*skb
, unsigned char *haddr
)
1300 const struct iphdr
*iph
= (const struct iphdr
*) skb_mac_header(skb
);
1301 memcpy(haddr
, &iph
->saddr
, 4);
1305 static const struct header_ops ipgre_header_ops
= {
1306 .create
= ipgre_header
,
1307 .parse
= ipgre_header_parse
,
1310 #ifdef CONFIG_NET_IPGRE_BROADCAST
1311 static int ipgre_open(struct net_device
*dev
)
1313 struct ip_tunnel
*t
= netdev_priv(dev
);
1315 if (ipv4_is_multicast(t
->parms
.iph
.daddr
)) {
1319 rt
= ip_route_output_gre(dev_net(dev
), &fl4
,
1323 RT_TOS(t
->parms
.iph
.tos
),
1326 return -EADDRNOTAVAIL
;
1329 if (__in_dev_get_rtnl(dev
) == NULL
)
1330 return -EADDRNOTAVAIL
;
1331 t
->mlink
= dev
->ifindex
;
1332 ip_mc_inc_group(__in_dev_get_rtnl(dev
), t
->parms
.iph
.daddr
);
1337 static int ipgre_close(struct net_device
*dev
)
1339 struct ip_tunnel
*t
= netdev_priv(dev
);
1341 if (ipv4_is_multicast(t
->parms
.iph
.daddr
) && t
->mlink
) {
1342 struct in_device
*in_dev
;
1343 in_dev
= inetdev_by_index(dev_net(dev
), t
->mlink
);
1345 ip_mc_dec_group(in_dev
, t
->parms
.iph
.daddr
);
1352 static const struct net_device_ops ipgre_netdev_ops
= {
1353 .ndo_init
= ipgre_tunnel_init
,
1354 .ndo_uninit
= ipgre_tunnel_uninit
,
1355 #ifdef CONFIG_NET_IPGRE_BROADCAST
1356 .ndo_open
= ipgre_open
,
1357 .ndo_stop
= ipgre_close
,
1359 .ndo_start_xmit
= ipgre_tunnel_xmit
,
1360 .ndo_do_ioctl
= ipgre_tunnel_ioctl
,
1361 .ndo_change_mtu
= ipgre_tunnel_change_mtu
,
1362 .ndo_get_stats64
= ipgre_get_stats64
,
1365 static void ipgre_dev_free(struct net_device
*dev
)
1367 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1369 gro_cells_destroy(&tunnel
->gro_cells
);
1370 free_percpu(dev
->tstats
);
1374 #define GRE_FEATURES (NETIF_F_SG | \
1375 NETIF_F_FRAGLIST | \
1379 static void ipgre_tunnel_setup(struct net_device
*dev
)
1381 dev
->netdev_ops
= &ipgre_netdev_ops
;
1382 dev
->destructor
= ipgre_dev_free
;
1384 dev
->type
= ARPHRD_IPGRE
;
1385 dev
->needed_headroom
= LL_MAX_HEADER
+ sizeof(struct iphdr
) + 4;
1386 dev
->mtu
= ETH_DATA_LEN
- sizeof(struct iphdr
) - 4;
1387 dev
->flags
= IFF_NOARP
;
1390 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1391 dev
->priv_flags
&= ~IFF_XMIT_DST_RELEASE
;
1393 dev
->features
|= GRE_FEATURES
;
1394 dev
->hw_features
|= GRE_FEATURES
;
1397 static int ipgre_tunnel_init(struct net_device
*dev
)
1399 struct ip_tunnel
*tunnel
;
1403 tunnel
= netdev_priv(dev
);
1404 iph
= &tunnel
->parms
.iph
;
1407 strcpy(tunnel
->parms
.name
, dev
->name
);
1409 memcpy(dev
->dev_addr
, &tunnel
->parms
.iph
.saddr
, 4);
1410 memcpy(dev
->broadcast
, &tunnel
->parms
.iph
.daddr
, 4);
1413 #ifdef CONFIG_NET_IPGRE_BROADCAST
1414 if (ipv4_is_multicast(iph
->daddr
)) {
1417 dev
->flags
= IFF_BROADCAST
;
1418 dev
->header_ops
= &ipgre_header_ops
;
1422 dev
->header_ops
= &ipgre_header_ops
;
1424 dev
->tstats
= alloc_percpu(struct pcpu_tstats
);
1428 err
= gro_cells_init(&tunnel
->gro_cells
, dev
);
1430 free_percpu(dev
->tstats
);
1437 static void ipgre_fb_tunnel_init(struct net_device
*dev
)
1439 struct ip_tunnel
*tunnel
= netdev_priv(dev
);
1440 struct iphdr
*iph
= &tunnel
->parms
.iph
;
1443 strcpy(tunnel
->parms
.name
, dev
->name
);
1446 iph
->protocol
= IPPROTO_GRE
;
1448 tunnel
->hlen
= sizeof(struct iphdr
) + 4;
1454 static const struct gre_protocol ipgre_protocol
= {
1455 .handler
= ipgre_rcv
,
1456 .err_handler
= ipgre_err
,
1459 static void ipgre_destroy_tunnels(struct ipgre_net
*ign
, struct list_head
*head
)
1463 for (prio
= 0; prio
< 4; prio
++) {
1465 for (h
= 0; h
< HASH_SIZE
; h
++) {
1466 struct ip_tunnel
*t
;
1468 t
= rtnl_dereference(ign
->tunnels
[prio
][h
]);
1471 unregister_netdevice_queue(t
->dev
, head
);
1472 t
= rtnl_dereference(t
->next
);
1478 static int __net_init
ipgre_init_net(struct net
*net
)
1480 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
1483 ign
->fb_tunnel_dev
= alloc_netdev(sizeof(struct ip_tunnel
), "gre0",
1484 ipgre_tunnel_setup
);
1485 if (!ign
->fb_tunnel_dev
) {
1489 dev_net_set(ign
->fb_tunnel_dev
, net
);
1491 ipgre_fb_tunnel_init(ign
->fb_tunnel_dev
);
1492 ign
->fb_tunnel_dev
->rtnl_link_ops
= &ipgre_link_ops
;
1494 if ((err
= register_netdev(ign
->fb_tunnel_dev
)))
1497 rcu_assign_pointer(ign
->tunnels_wc
[0],
1498 netdev_priv(ign
->fb_tunnel_dev
));
1502 ipgre_dev_free(ign
->fb_tunnel_dev
);
1507 static void __net_exit
ipgre_exit_net(struct net
*net
)
1509 struct ipgre_net
*ign
;
1512 ign
= net_generic(net
, ipgre_net_id
);
1514 ipgre_destroy_tunnels(ign
, &list
);
1515 unregister_netdevice_many(&list
);
1519 static struct pernet_operations ipgre_net_ops
= {
1520 .init
= ipgre_init_net
,
1521 .exit
= ipgre_exit_net
,
1522 .id
= &ipgre_net_id
,
1523 .size
= sizeof(struct ipgre_net
),
1526 static int ipgre_tunnel_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1534 if (data
[IFLA_GRE_IFLAGS
])
1535 flags
|= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
1536 if (data
[IFLA_GRE_OFLAGS
])
1537 flags
|= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
1538 if (flags
& (GRE_VERSION
|GRE_ROUTING
))
1544 static int ipgre_tap_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1548 if (tb
[IFLA_ADDRESS
]) {
1549 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
1551 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
1552 return -EADDRNOTAVAIL
;
1558 if (data
[IFLA_GRE_REMOTE
]) {
1559 memcpy(&daddr
, nla_data(data
[IFLA_GRE_REMOTE
]), 4);
1565 return ipgre_tunnel_validate(tb
, data
);
1568 static void ipgre_netlink_parms(struct nlattr
*data
[],
1569 struct ip_tunnel_parm
*parms
)
1571 memset(parms
, 0, sizeof(*parms
));
1573 parms
->iph
.protocol
= IPPROTO_GRE
;
1578 if (data
[IFLA_GRE_LINK
])
1579 parms
->link
= nla_get_u32(data
[IFLA_GRE_LINK
]);
1581 if (data
[IFLA_GRE_IFLAGS
])
1582 parms
->i_flags
= nla_get_be16(data
[IFLA_GRE_IFLAGS
]);
1584 if (data
[IFLA_GRE_OFLAGS
])
1585 parms
->o_flags
= nla_get_be16(data
[IFLA_GRE_OFLAGS
]);
1587 if (data
[IFLA_GRE_IKEY
])
1588 parms
->i_key
= nla_get_be32(data
[IFLA_GRE_IKEY
]);
1590 if (data
[IFLA_GRE_OKEY
])
1591 parms
->o_key
= nla_get_be32(data
[IFLA_GRE_OKEY
]);
1593 if (data
[IFLA_GRE_LOCAL
])
1594 parms
->iph
.saddr
= nla_get_be32(data
[IFLA_GRE_LOCAL
]);
1596 if (data
[IFLA_GRE_REMOTE
])
1597 parms
->iph
.daddr
= nla_get_be32(data
[IFLA_GRE_REMOTE
]);
1599 if (data
[IFLA_GRE_TTL
])
1600 parms
->iph
.ttl
= nla_get_u8(data
[IFLA_GRE_TTL
]);
1602 if (data
[IFLA_GRE_TOS
])
1603 parms
->iph
.tos
= nla_get_u8(data
[IFLA_GRE_TOS
]);
1605 if (!data
[IFLA_GRE_PMTUDISC
] || nla_get_u8(data
[IFLA_GRE_PMTUDISC
]))
1606 parms
->iph
.frag_off
= htons(IP_DF
);
1609 static int ipgre_tap_init(struct net_device
*dev
)
1611 struct ip_tunnel
*tunnel
;
1613 tunnel
= netdev_priv(dev
);
1616 strcpy(tunnel
->parms
.name
, dev
->name
);
1618 ipgre_tunnel_bind_dev(dev
);
1620 dev
->tstats
= alloc_percpu(struct pcpu_tstats
);
1627 static const struct net_device_ops ipgre_tap_netdev_ops
= {
1628 .ndo_init
= ipgre_tap_init
,
1629 .ndo_uninit
= ipgre_tunnel_uninit
,
1630 .ndo_start_xmit
= ipgre_tunnel_xmit
,
1631 .ndo_set_mac_address
= eth_mac_addr
,
1632 .ndo_validate_addr
= eth_validate_addr
,
1633 .ndo_change_mtu
= ipgre_tunnel_change_mtu
,
1634 .ndo_get_stats64
= ipgre_get_stats64
,
1637 static void ipgre_tap_setup(struct net_device
*dev
)
1642 dev
->netdev_ops
= &ipgre_tap_netdev_ops
;
1643 dev
->destructor
= ipgre_dev_free
;
1646 dev
->features
|= NETIF_F_NETNS_LOCAL
;
1648 dev
->features
|= GRE_FEATURES
;
1649 dev
->hw_features
|= GRE_FEATURES
;
1652 static int ipgre_newlink(struct net
*src_net
, struct net_device
*dev
, struct nlattr
*tb
[],
1653 struct nlattr
*data
[])
1655 struct ip_tunnel
*nt
;
1656 struct net
*net
= dev_net(dev
);
1657 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
1661 nt
= netdev_priv(dev
);
1662 ipgre_netlink_parms(data
, &nt
->parms
);
1664 if (ipgre_tunnel_find(net
, &nt
->parms
, dev
->type
))
1667 if (dev
->type
== ARPHRD_ETHER
&& !tb
[IFLA_ADDRESS
])
1668 eth_hw_addr_random(dev
);
1670 mtu
= ipgre_tunnel_bind_dev(dev
);
1674 /* Can use a lockless transmit, unless we generate output sequences */
1675 if (!(nt
->parms
.o_flags
& GRE_SEQ
))
1676 dev
->features
|= NETIF_F_LLTX
;
1678 err
= register_netdevice(dev
);
1683 ipgre_tunnel_link(ign
, nt
);
1689 static int ipgre_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
1690 struct nlattr
*data
[])
1692 struct ip_tunnel
*t
, *nt
;
1693 struct net
*net
= dev_net(dev
);
1694 struct ipgre_net
*ign
= net_generic(net
, ipgre_net_id
);
1695 struct ip_tunnel_parm p
;
1698 if (dev
== ign
->fb_tunnel_dev
)
1701 nt
= netdev_priv(dev
);
1702 ipgre_netlink_parms(data
, &p
);
1704 t
= ipgre_tunnel_locate(net
, &p
, 0);
1712 if (dev
->type
!= ARPHRD_ETHER
) {
1713 unsigned int nflags
= 0;
1715 if (ipv4_is_multicast(p
.iph
.daddr
))
1716 nflags
= IFF_BROADCAST
;
1717 else if (p
.iph
.daddr
)
1718 nflags
= IFF_POINTOPOINT
;
1720 if ((dev
->flags
^ nflags
) &
1721 (IFF_POINTOPOINT
| IFF_BROADCAST
))
1725 ipgre_tunnel_unlink(ign
, t
);
1726 t
->parms
.iph
.saddr
= p
.iph
.saddr
;
1727 t
->parms
.iph
.daddr
= p
.iph
.daddr
;
1728 t
->parms
.i_key
= p
.i_key
;
1729 if (dev
->type
!= ARPHRD_ETHER
) {
1730 memcpy(dev
->dev_addr
, &p
.iph
.saddr
, 4);
1731 memcpy(dev
->broadcast
, &p
.iph
.daddr
, 4);
1733 ipgre_tunnel_link(ign
, t
);
1734 netdev_state_change(dev
);
1737 t
->parms
.o_key
= p
.o_key
;
1738 t
->parms
.iph
.ttl
= p
.iph
.ttl
;
1739 t
->parms
.iph
.tos
= p
.iph
.tos
;
1740 t
->parms
.iph
.frag_off
= p
.iph
.frag_off
;
1742 if (t
->parms
.link
!= p
.link
) {
1743 t
->parms
.link
= p
.link
;
1744 mtu
= ipgre_tunnel_bind_dev(dev
);
1747 netdev_state_change(dev
);
1753 static size_t ipgre_get_size(const struct net_device
*dev
)
1758 /* IFLA_GRE_IFLAGS */
1760 /* IFLA_GRE_OFLAGS */
1766 /* IFLA_GRE_LOCAL */
1768 /* IFLA_GRE_REMOTE */
1774 /* IFLA_GRE_PMTUDISC */
1779 static int ipgre_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
1781 struct ip_tunnel
*t
= netdev_priv(dev
);
1782 struct ip_tunnel_parm
*p
= &t
->parms
;
1784 if (nla_put_u32(skb
, IFLA_GRE_LINK
, p
->link
) ||
1785 nla_put_be16(skb
, IFLA_GRE_IFLAGS
, p
->i_flags
) ||
1786 nla_put_be16(skb
, IFLA_GRE_OFLAGS
, p
->o_flags
) ||
1787 nla_put_be32(skb
, IFLA_GRE_IKEY
, p
->i_key
) ||
1788 nla_put_be32(skb
, IFLA_GRE_OKEY
, p
->o_key
) ||
1789 nla_put_be32(skb
, IFLA_GRE_LOCAL
, p
->iph
.saddr
) ||
1790 nla_put_be32(skb
, IFLA_GRE_REMOTE
, p
->iph
.daddr
) ||
1791 nla_put_u8(skb
, IFLA_GRE_TTL
, p
->iph
.ttl
) ||
1792 nla_put_u8(skb
, IFLA_GRE_TOS
, p
->iph
.tos
) ||
1793 nla_put_u8(skb
, IFLA_GRE_PMTUDISC
,
1794 !!(p
->iph
.frag_off
& htons(IP_DF
))))
1795 goto nla_put_failure
;
1802 static const struct nla_policy ipgre_policy
[IFLA_GRE_MAX
+ 1] = {
1803 [IFLA_GRE_LINK
] = { .type
= NLA_U32
},
1804 [IFLA_GRE_IFLAGS
] = { .type
= NLA_U16
},
1805 [IFLA_GRE_OFLAGS
] = { .type
= NLA_U16
},
1806 [IFLA_GRE_IKEY
] = { .type
= NLA_U32
},
1807 [IFLA_GRE_OKEY
] = { .type
= NLA_U32
},
1808 [IFLA_GRE_LOCAL
] = { .len
= FIELD_SIZEOF(struct iphdr
, saddr
) },
1809 [IFLA_GRE_REMOTE
] = { .len
= FIELD_SIZEOF(struct iphdr
, daddr
) },
1810 [IFLA_GRE_TTL
] = { .type
= NLA_U8
},
1811 [IFLA_GRE_TOS
] = { .type
= NLA_U8
},
1812 [IFLA_GRE_PMTUDISC
] = { .type
= NLA_U8
},
1815 static struct rtnl_link_ops ipgre_link_ops __read_mostly
= {
1817 .maxtype
= IFLA_GRE_MAX
,
1818 .policy
= ipgre_policy
,
1819 .priv_size
= sizeof(struct ip_tunnel
),
1820 .setup
= ipgre_tunnel_setup
,
1821 .validate
= ipgre_tunnel_validate
,
1822 .newlink
= ipgre_newlink
,
1823 .changelink
= ipgre_changelink
,
1824 .get_size
= ipgre_get_size
,
1825 .fill_info
= ipgre_fill_info
,
1828 static struct rtnl_link_ops ipgre_tap_ops __read_mostly
= {
1830 .maxtype
= IFLA_GRE_MAX
,
1831 .policy
= ipgre_policy
,
1832 .priv_size
= sizeof(struct ip_tunnel
),
1833 .setup
= ipgre_tap_setup
,
1834 .validate
= ipgre_tap_validate
,
1835 .newlink
= ipgre_newlink
,
1836 .changelink
= ipgre_changelink
,
1837 .get_size
= ipgre_get_size
,
1838 .fill_info
= ipgre_fill_info
,
1842 * And now the modules code and kernel interface.
1845 static int __init
ipgre_init(void)
1849 pr_info("GRE over IPv4 tunneling driver\n");
1851 err
= register_pernet_device(&ipgre_net_ops
);
1855 err
= gre_add_protocol(&ipgre_protocol
, GREPROTO_CISCO
);
1857 pr_info("%s: can't add protocol\n", __func__
);
1858 goto add_proto_failed
;
1861 err
= rtnl_link_register(&ipgre_link_ops
);
1863 goto rtnl_link_failed
;
1865 err
= rtnl_link_register(&ipgre_tap_ops
);
1867 goto tap_ops_failed
;
1873 rtnl_link_unregister(&ipgre_link_ops
);
1875 gre_del_protocol(&ipgre_protocol
, GREPROTO_CISCO
);
1877 unregister_pernet_device(&ipgre_net_ops
);
1881 static void __exit
ipgre_fini(void)
1883 rtnl_link_unregister(&ipgre_tap_ops
);
1884 rtnl_link_unregister(&ipgre_link_ops
);
1885 if (gre_del_protocol(&ipgre_protocol
, GREPROTO_CISCO
) < 0)
1886 pr_info("%s: can't remove protocol\n", __func__
);
1887 unregister_pernet_device(&ipgre_net_ops
);
1890 module_init(ipgre_init
);
1891 module_exit(ipgre_fini
);
1892 MODULE_LICENSE("GPL");
1893 MODULE_ALIAS_RTNL_LINK("gre");
1894 MODULE_ALIAS_RTNL_LINK("gretap");
1895 MODULE_ALIAS_NETDEV("gre0");