fec: Use devm_request_and_ioremap()
[deliverable/linux.git] / net / ipv4 / ip_gre.c
1 /*
2 * Linux NET3: GRE over IP protocol decoder.
3 *
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/mroute.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35
36 #include <net/sock.h>
37 #include <net/ip.h>
38 #include <net/icmp.h>
39 #include <net/protocol.h>
40 #include <net/ipip.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/gre.h>
50
51 #if IS_ENABLED(CONFIG_IPV6)
52 #include <net/ipv6.h>
53 #include <net/ip6_fib.h>
54 #include <net/ip6_route.h>
55 #endif
56
57 /*
58 Problems & solutions
59 --------------------
60
61 1. The most important issue is detecting local dead loops.
62 They would cause complete host lockup in transmit, which
63 would be "resolved" by stack overflow or, if queueing is enabled,
64 with infinite looping in net_bh.
65
66 We cannot track such dead loops during route installation,
67 it is infeasible task. The most general solutions would be
68 to keep skb->encapsulation counter (sort of local ttl),
69 and silently drop packet when it expires. It is a good
70 solution, but it supposes maintaining new variable in ALL
71 skb, even if no tunneling is used.
72
73 Current solution: xmit_recursion breaks dead loops. This is a percpu
74 counter, since when we enter the first ndo_xmit(), cpu migration is
75 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
76
77 2. Networking dead loops would not kill routers, but would really
78 kill network. IP hop limit plays role of "t->recursion" in this case,
79 if we copy it from packet being encapsulated to upper header.
80 It is very good solution, but it introduces two problems:
81
82 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
83 do not work over tunnels.
84 - traceroute does not work. I planned to relay ICMP from tunnel,
85 so that this problem would be solved and traceroute output
86 would even more informative. This idea appeared to be wrong:
87 only Linux complies to rfc1812 now (yes, guys, Linux is the only
88 true router now :-)), all routers (at least, in neighbourhood of mine)
89 return only 8 bytes of payload. It is the end.
90
91 Hence, if we want that OSPF worked or traceroute said something reasonable,
92 we should search for another solution.
93
94 One of them is to parse packet trying to detect inner encapsulation
95 made by our node. It is difficult or even impossible, especially,
96 taking into account fragmentation. TO be short, ttl is not solution at all.
97
98 Current solution: The solution was UNEXPECTEDLY SIMPLE.
99 We force DF flag on tunnels with preconfigured hop limit,
100 that is ALL. :-) Well, it does not remove the problem completely,
101 but exponential growth of network traffic is changed to linear
102 (branches, that exceed pmtu are pruned) and tunnel mtu
103 rapidly degrades to value <68, where looping stops.
104 Yes, it is not good if there exists a router in the loop,
105 which does not force DF, even when encapsulating packets have DF set.
106 But it is not our problem! Nobody could accuse us, we made
107 all that we could make. Even if it is your gated who injected
108 fatal route to network, even if it were you who configured
109 fatal static route: you are innocent. :-)
110
111
112
113 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
114 practically identical code. It would be good to glue them
115 together, but it is not very evident, how to make them modular.
116 sit is integral part of IPv6, ipip and gre are naturally modular.
117 We could extract common parts (hash table, ioctl etc)
118 to a separate module (ip_tunnel.c).
119
120 Alexey Kuznetsov.
121 */
122
123 static bool log_ecn_error = true;
124 module_param(log_ecn_error, bool, 0644);
125 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
126
127 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
128 static int ipgre_tunnel_init(struct net_device *dev);
129 static void ipgre_tunnel_setup(struct net_device *dev);
130 static int ipgre_tunnel_bind_dev(struct net_device *dev);
131
132 /* Fallback tunnel: no source, no destination, no key, no options */
133
134 #define HASH_SIZE 16
135
136 static int ipgre_net_id __read_mostly;
137 struct ipgre_net {
138 struct ip_tunnel __rcu *tunnels[4][HASH_SIZE];
139
140 struct net_device *fb_tunnel_dev;
141 };
142
143 /* Tunnel hash table */
144
145 /*
146 4 hash tables:
147
148 3: (remote,local)
149 2: (remote,*)
150 1: (*,local)
151 0: (*,*)
152
153 We require exact key match i.e. if a key is present in packet
154 it will match only tunnel with the same key; if it is not present,
155 it will match only keyless tunnel.
156
157 All keysless packets, if not matched configured keyless tunnels
158 will match fallback tunnel.
159 */
160
161 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
162
163 #define tunnels_r_l tunnels[3]
164 #define tunnels_r tunnels[2]
165 #define tunnels_l tunnels[1]
166 #define tunnels_wc tunnels[0]
167
168 static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
169 struct rtnl_link_stats64 *tot)
170 {
171 int i;
172
173 for_each_possible_cpu(i) {
174 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
175 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
176 unsigned int start;
177
178 do {
179 start = u64_stats_fetch_begin_bh(&tstats->syncp);
180 rx_packets = tstats->rx_packets;
181 tx_packets = tstats->tx_packets;
182 rx_bytes = tstats->rx_bytes;
183 tx_bytes = tstats->tx_bytes;
184 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
185
186 tot->rx_packets += rx_packets;
187 tot->tx_packets += tx_packets;
188 tot->rx_bytes += rx_bytes;
189 tot->tx_bytes += tx_bytes;
190 }
191
192 tot->multicast = dev->stats.multicast;
193 tot->rx_crc_errors = dev->stats.rx_crc_errors;
194 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
195 tot->rx_length_errors = dev->stats.rx_length_errors;
196 tot->rx_frame_errors = dev->stats.rx_frame_errors;
197 tot->rx_errors = dev->stats.rx_errors;
198
199 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
200 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
201 tot->tx_dropped = dev->stats.tx_dropped;
202 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
203 tot->tx_errors = dev->stats.tx_errors;
204
205 return tot;
206 }
207
208 /* Does key in tunnel parameters match packet */
209 static bool ipgre_key_match(const struct ip_tunnel_parm *p,
210 __be16 flags, __be32 key)
211 {
212 if (p->i_flags & GRE_KEY) {
213 if (flags & GRE_KEY)
214 return key == p->i_key;
215 else
216 return false; /* key expected, none present */
217 } else
218 return !(flags & GRE_KEY);
219 }
220
221 /* Given src, dst and key, find appropriate for input tunnel. */
222
223 static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
224 __be32 remote, __be32 local,
225 __be16 flags, __be32 key,
226 __be16 gre_proto)
227 {
228 struct net *net = dev_net(dev);
229 int link = dev->ifindex;
230 unsigned int h0 = HASH(remote);
231 unsigned int h1 = HASH(key);
232 struct ip_tunnel *t, *cand = NULL;
233 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
234 int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
235 ARPHRD_ETHER : ARPHRD_IPGRE;
236 int score, cand_score = 4;
237
238 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
239 if (local != t->parms.iph.saddr ||
240 remote != t->parms.iph.daddr ||
241 !(t->dev->flags & IFF_UP))
242 continue;
243
244 if (!ipgre_key_match(&t->parms, flags, key))
245 continue;
246
247 if (t->dev->type != ARPHRD_IPGRE &&
248 t->dev->type != dev_type)
249 continue;
250
251 score = 0;
252 if (t->parms.link != link)
253 score |= 1;
254 if (t->dev->type != dev_type)
255 score |= 2;
256 if (score == 0)
257 return t;
258
259 if (score < cand_score) {
260 cand = t;
261 cand_score = score;
262 }
263 }
264
265 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
266 if (remote != t->parms.iph.daddr ||
267 !(t->dev->flags & IFF_UP))
268 continue;
269
270 if (!ipgre_key_match(&t->parms, flags, key))
271 continue;
272
273 if (t->dev->type != ARPHRD_IPGRE &&
274 t->dev->type != dev_type)
275 continue;
276
277 score = 0;
278 if (t->parms.link != link)
279 score |= 1;
280 if (t->dev->type != dev_type)
281 score |= 2;
282 if (score == 0)
283 return t;
284
285 if (score < cand_score) {
286 cand = t;
287 cand_score = score;
288 }
289 }
290
291 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
292 if ((local != t->parms.iph.saddr &&
293 (local != t->parms.iph.daddr ||
294 !ipv4_is_multicast(local))) ||
295 !(t->dev->flags & IFF_UP))
296 continue;
297
298 if (!ipgre_key_match(&t->parms, flags, key))
299 continue;
300
301 if (t->dev->type != ARPHRD_IPGRE &&
302 t->dev->type != dev_type)
303 continue;
304
305 score = 0;
306 if (t->parms.link != link)
307 score |= 1;
308 if (t->dev->type != dev_type)
309 score |= 2;
310 if (score == 0)
311 return t;
312
313 if (score < cand_score) {
314 cand = t;
315 cand_score = score;
316 }
317 }
318
319 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
320 if (t->parms.i_key != key ||
321 !(t->dev->flags & IFF_UP))
322 continue;
323
324 if (t->dev->type != ARPHRD_IPGRE &&
325 t->dev->type != dev_type)
326 continue;
327
328 score = 0;
329 if (t->parms.link != link)
330 score |= 1;
331 if (t->dev->type != dev_type)
332 score |= 2;
333 if (score == 0)
334 return t;
335
336 if (score < cand_score) {
337 cand = t;
338 cand_score = score;
339 }
340 }
341
342 if (cand != NULL)
343 return cand;
344
345 dev = ign->fb_tunnel_dev;
346 if (dev->flags & IFF_UP)
347 return netdev_priv(dev);
348
349 return NULL;
350 }
351
352 static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign,
353 struct ip_tunnel_parm *parms)
354 {
355 __be32 remote = parms->iph.daddr;
356 __be32 local = parms->iph.saddr;
357 __be32 key = parms->i_key;
358 unsigned int h = HASH(key);
359 int prio = 0;
360
361 if (local)
362 prio |= 1;
363 if (remote && !ipv4_is_multicast(remote)) {
364 prio |= 2;
365 h ^= HASH(remote);
366 }
367
368 return &ign->tunnels[prio][h];
369 }
370
371 static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign,
372 struct ip_tunnel *t)
373 {
374 return __ipgre_bucket(ign, &t->parms);
375 }
376
377 static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
378 {
379 struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t);
380
381 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
382 rcu_assign_pointer(*tp, t);
383 }
384
385 static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
386 {
387 struct ip_tunnel __rcu **tp;
388 struct ip_tunnel *iter;
389
390 for (tp = ipgre_bucket(ign, t);
391 (iter = rtnl_dereference(*tp)) != NULL;
392 tp = &iter->next) {
393 if (t == iter) {
394 rcu_assign_pointer(*tp, t->next);
395 break;
396 }
397 }
398 }
399
400 static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
401 struct ip_tunnel_parm *parms,
402 int type)
403 {
404 __be32 remote = parms->iph.daddr;
405 __be32 local = parms->iph.saddr;
406 __be32 key = parms->i_key;
407 int link = parms->link;
408 struct ip_tunnel *t;
409 struct ip_tunnel __rcu **tp;
410 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
411
412 for (tp = __ipgre_bucket(ign, parms);
413 (t = rtnl_dereference(*tp)) != NULL;
414 tp = &t->next)
415 if (local == t->parms.iph.saddr &&
416 remote == t->parms.iph.daddr &&
417 key == t->parms.i_key &&
418 link == t->parms.link &&
419 type == t->dev->type)
420 break;
421
422 return t;
423 }
424
425 static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
426 struct ip_tunnel_parm *parms, int create)
427 {
428 struct ip_tunnel *t, *nt;
429 struct net_device *dev;
430 char name[IFNAMSIZ];
431 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
432
433 t = ipgre_tunnel_find(net, parms, ARPHRD_IPGRE);
434 if (t || !create)
435 return t;
436
437 if (parms->name[0])
438 strlcpy(name, parms->name, IFNAMSIZ);
439 else
440 strcpy(name, "gre%d");
441
442 dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
443 if (!dev)
444 return NULL;
445
446 dev_net_set(dev, net);
447
448 nt = netdev_priv(dev);
449 nt->parms = *parms;
450 dev->rtnl_link_ops = &ipgre_link_ops;
451
452 dev->mtu = ipgre_tunnel_bind_dev(dev);
453
454 if (register_netdevice(dev) < 0)
455 goto failed_free;
456
457 /* Can use a lockless transmit, unless we generate output sequences */
458 if (!(nt->parms.o_flags & GRE_SEQ))
459 dev->features |= NETIF_F_LLTX;
460
461 dev_hold(dev);
462 ipgre_tunnel_link(ign, nt);
463 return nt;
464
465 failed_free:
466 free_netdev(dev);
467 return NULL;
468 }
469
470 static void ipgre_tunnel_uninit(struct net_device *dev)
471 {
472 struct net *net = dev_net(dev);
473 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
474
475 ipgre_tunnel_unlink(ign, netdev_priv(dev));
476 dev_put(dev);
477 }
478
479
480 static void ipgre_err(struct sk_buff *skb, u32 info)
481 {
482
483 /* All the routers (except for Linux) return only
484 8 bytes of packet payload. It means, that precise relaying of
485 ICMP in the real Internet is absolutely infeasible.
486
487 Moreover, Cisco "wise men" put GRE key to the third word
488 in GRE header. It makes impossible maintaining even soft state for keyed
489 GRE tunnels with enabled checksum. Tell them "thank you".
490
491 Well, I wonder, rfc1812 was written by Cisco employee,
492 what the hell these idiots break standards established
493 by themselves???
494 */
495
496 const struct iphdr *iph = (const struct iphdr *)skb->data;
497 __be16 *p = (__be16 *)(skb->data+(iph->ihl<<2));
498 int grehlen = (iph->ihl<<2) + 4;
499 const int type = icmp_hdr(skb)->type;
500 const int code = icmp_hdr(skb)->code;
501 struct ip_tunnel *t;
502 __be16 flags;
503 __be32 key = 0;
504
505 flags = p[0];
506 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
507 if (flags&(GRE_VERSION|GRE_ROUTING))
508 return;
509 if (flags&GRE_KEY) {
510 grehlen += 4;
511 if (flags&GRE_CSUM)
512 grehlen += 4;
513 }
514 }
515
516 /* If only 8 bytes returned, keyed message will be dropped here */
517 if (skb_headlen(skb) < grehlen)
518 return;
519
520 if (flags & GRE_KEY)
521 key = *(((__be32 *)p) + (grehlen / 4) - 1);
522
523 switch (type) {
524 default:
525 case ICMP_PARAMETERPROB:
526 return;
527
528 case ICMP_DEST_UNREACH:
529 switch (code) {
530 case ICMP_SR_FAILED:
531 case ICMP_PORT_UNREACH:
532 /* Impossible event. */
533 return;
534 default:
535 /* All others are translated to HOST_UNREACH.
536 rfc2003 contains "deep thoughts" about NET_UNREACH,
537 I believe they are just ether pollution. --ANK
538 */
539 break;
540 }
541 break;
542 case ICMP_TIME_EXCEEDED:
543 if (code != ICMP_EXC_TTL)
544 return;
545 break;
546
547 case ICMP_REDIRECT:
548 break;
549 }
550
551 t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
552 flags, key, p[1]);
553
554 if (t == NULL)
555 return;
556
557 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
558 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
559 t->parms.link, 0, IPPROTO_GRE, 0);
560 return;
561 }
562 if (type == ICMP_REDIRECT) {
563 ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
564 IPPROTO_GRE, 0);
565 return;
566 }
567 if (t->parms.iph.daddr == 0 ||
568 ipv4_is_multicast(t->parms.iph.daddr))
569 return;
570
571 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
572 return;
573
574 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
575 t->err_count++;
576 else
577 t->err_count = 1;
578 t->err_time = jiffies;
579 }
580
581 static inline u8
582 ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb)
583 {
584 u8 inner = 0;
585 if (skb->protocol == htons(ETH_P_IP))
586 inner = old_iph->tos;
587 else if (skb->protocol == htons(ETH_P_IPV6))
588 inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
589 return INET_ECN_encapsulate(tos, inner);
590 }
591
592 static int ipgre_rcv(struct sk_buff *skb)
593 {
594 const struct iphdr *iph;
595 u8 *h;
596 __be16 flags;
597 __sum16 csum = 0;
598 __be32 key = 0;
599 u32 seqno = 0;
600 struct ip_tunnel *tunnel;
601 int offset = 4;
602 __be16 gre_proto;
603 int err;
604
605 if (!pskb_may_pull(skb, 16))
606 goto drop;
607
608 iph = ip_hdr(skb);
609 h = skb->data;
610 flags = *(__be16 *)h;
611
612 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
613 /* - Version must be 0.
614 - We do not support routing headers.
615 */
616 if (flags&(GRE_VERSION|GRE_ROUTING))
617 goto drop;
618
619 if (flags&GRE_CSUM) {
620 switch (skb->ip_summed) {
621 case CHECKSUM_COMPLETE:
622 csum = csum_fold(skb->csum);
623 if (!csum)
624 break;
625 /* fall through */
626 case CHECKSUM_NONE:
627 skb->csum = 0;
628 csum = __skb_checksum_complete(skb);
629 skb->ip_summed = CHECKSUM_COMPLETE;
630 }
631 offset += 4;
632 }
633 if (flags&GRE_KEY) {
634 key = *(__be32 *)(h + offset);
635 offset += 4;
636 }
637 if (flags&GRE_SEQ) {
638 seqno = ntohl(*(__be32 *)(h + offset));
639 offset += 4;
640 }
641 }
642
643 gre_proto = *(__be16 *)(h + 2);
644
645 tunnel = ipgre_tunnel_lookup(skb->dev,
646 iph->saddr, iph->daddr, flags, key,
647 gre_proto);
648 if (tunnel) {
649 struct pcpu_tstats *tstats;
650
651 secpath_reset(skb);
652
653 skb->protocol = gre_proto;
654 /* WCCP version 1 and 2 protocol decoding.
655 * - Change protocol to IP
656 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
657 */
658 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
659 skb->protocol = htons(ETH_P_IP);
660 if ((*(h + offset) & 0xF0) != 0x40)
661 offset += 4;
662 }
663
664 skb->mac_header = skb->network_header;
665 __pskb_pull(skb, offset);
666 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
667 skb->pkt_type = PACKET_HOST;
668 #ifdef CONFIG_NET_IPGRE_BROADCAST
669 if (ipv4_is_multicast(iph->daddr)) {
670 /* Looped back packet, drop it! */
671 if (rt_is_output_route(skb_rtable(skb)))
672 goto drop;
673 tunnel->dev->stats.multicast++;
674 skb->pkt_type = PACKET_BROADCAST;
675 }
676 #endif
677
678 if (((flags&GRE_CSUM) && csum) ||
679 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
680 tunnel->dev->stats.rx_crc_errors++;
681 tunnel->dev->stats.rx_errors++;
682 goto drop;
683 }
684 if (tunnel->parms.i_flags&GRE_SEQ) {
685 if (!(flags&GRE_SEQ) ||
686 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
687 tunnel->dev->stats.rx_fifo_errors++;
688 tunnel->dev->stats.rx_errors++;
689 goto drop;
690 }
691 tunnel->i_seqno = seqno + 1;
692 }
693
694 /* Warning: All skb pointers will be invalidated! */
695 if (tunnel->dev->type == ARPHRD_ETHER) {
696 if (!pskb_may_pull(skb, ETH_HLEN)) {
697 tunnel->dev->stats.rx_length_errors++;
698 tunnel->dev->stats.rx_errors++;
699 goto drop;
700 }
701
702 iph = ip_hdr(skb);
703 skb->protocol = eth_type_trans(skb, tunnel->dev);
704 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
705 }
706
707 __skb_tunnel_rx(skb, tunnel->dev);
708
709 skb_reset_network_header(skb);
710 err = IP_ECN_decapsulate(iph, skb);
711 if (unlikely(err)) {
712 if (log_ecn_error)
713 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
714 &iph->saddr, iph->tos);
715 if (err > 1) {
716 ++tunnel->dev->stats.rx_frame_errors;
717 ++tunnel->dev->stats.rx_errors;
718 goto drop;
719 }
720 }
721
722 tstats = this_cpu_ptr(tunnel->dev->tstats);
723 u64_stats_update_begin(&tstats->syncp);
724 tstats->rx_packets++;
725 tstats->rx_bytes += skb->len;
726 u64_stats_update_end(&tstats->syncp);
727
728 gro_cells_receive(&tunnel->gro_cells, skb);
729 return 0;
730 }
731 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
732
733 drop:
734 kfree_skb(skb);
735 return 0;
736 }
737
738 static struct sk_buff *handle_offloads(struct ip_tunnel *tunnel, struct sk_buff *skb)
739 {
740 int err;
741
742 if (skb_is_gso(skb)) {
743 err = skb_unclone(skb, GFP_ATOMIC);
744 if (unlikely(err))
745 goto error;
746 skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
747 return skb;
748 } else if (skb->ip_summed == CHECKSUM_PARTIAL &&
749 tunnel->parms.o_flags&GRE_CSUM) {
750 err = skb_checksum_help(skb);
751 if (unlikely(err))
752 goto error;
753 } else if (skb->ip_summed != CHECKSUM_PARTIAL)
754 skb->ip_summed = CHECKSUM_NONE;
755
756 return skb;
757
758 error:
759 kfree_skb(skb);
760 return ERR_PTR(err);
761 }
762
763 static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
764 {
765 struct ip_tunnel *tunnel = netdev_priv(dev);
766 const struct iphdr *old_iph;
767 const struct iphdr *tiph;
768 struct flowi4 fl4;
769 u8 tos;
770 __be16 df;
771 struct rtable *rt; /* Route to the other host */
772 struct net_device *tdev; /* Device to other host */
773 struct iphdr *iph; /* Our new IP header */
774 unsigned int max_headroom; /* The extra header space needed */
775 int gre_hlen;
776 __be32 dst;
777 int mtu;
778 u8 ttl;
779 int err;
780
781 skb = handle_offloads(tunnel, skb);
782 if (IS_ERR(skb)) {
783 dev->stats.tx_dropped++;
784 return NETDEV_TX_OK;
785 }
786
787 if (!skb->encapsulation) {
788 skb_reset_inner_headers(skb);
789 skb->encapsulation = 1;
790 }
791
792 old_iph = ip_hdr(skb);
793
794 if (dev->type == ARPHRD_ETHER)
795 IPCB(skb)->flags = 0;
796
797 if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
798 gre_hlen = 0;
799 if (skb->protocol == htons(ETH_P_IP))
800 tiph = (const struct iphdr *)skb->data;
801 else
802 tiph = &tunnel->parms.iph;
803 } else {
804 gre_hlen = tunnel->hlen;
805 tiph = &tunnel->parms.iph;
806 }
807
808 if ((dst = tiph->daddr) == 0) {
809 /* NBMA tunnel */
810
811 if (skb_dst(skb) == NULL) {
812 dev->stats.tx_fifo_errors++;
813 goto tx_error;
814 }
815
816 if (skb->protocol == htons(ETH_P_IP)) {
817 rt = skb_rtable(skb);
818 dst = rt_nexthop(rt, old_iph->daddr);
819 }
820 #if IS_ENABLED(CONFIG_IPV6)
821 else if (skb->protocol == htons(ETH_P_IPV6)) {
822 const struct in6_addr *addr6;
823 struct neighbour *neigh;
824 bool do_tx_error_icmp;
825 int addr_type;
826
827 neigh = dst_neigh_lookup(skb_dst(skb), &ipv6_hdr(skb)->daddr);
828 if (neigh == NULL)
829 goto tx_error;
830
831 addr6 = (const struct in6_addr *)&neigh->primary_key;
832 addr_type = ipv6_addr_type(addr6);
833
834 if (addr_type == IPV6_ADDR_ANY) {
835 addr6 = &ipv6_hdr(skb)->daddr;
836 addr_type = ipv6_addr_type(addr6);
837 }
838
839 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
840 do_tx_error_icmp = true;
841 else {
842 do_tx_error_icmp = false;
843 dst = addr6->s6_addr32[3];
844 }
845 neigh_release(neigh);
846 if (do_tx_error_icmp)
847 goto tx_error_icmp;
848 }
849 #endif
850 else
851 goto tx_error;
852 }
853
854 ttl = tiph->ttl;
855 tos = tiph->tos;
856 if (tos & 0x1) {
857 tos &= ~0x1;
858 if (skb->protocol == htons(ETH_P_IP))
859 tos = old_iph->tos;
860 else if (skb->protocol == htons(ETH_P_IPV6))
861 tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
862 }
863
864 rt = ip_route_output_gre(dev_net(dev), &fl4, dst, tiph->saddr,
865 tunnel->parms.o_key, RT_TOS(tos),
866 tunnel->parms.link);
867 if (IS_ERR(rt)) {
868 dev->stats.tx_carrier_errors++;
869 goto tx_error;
870 }
871 tdev = rt->dst.dev;
872
873 if (tdev == dev) {
874 ip_rt_put(rt);
875 dev->stats.collisions++;
876 goto tx_error;
877 }
878
879 df = tiph->frag_off;
880 if (df)
881 mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen;
882 else
883 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
884
885 if (skb_dst(skb))
886 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
887
888 if (skb->protocol == htons(ETH_P_IP)) {
889 df |= (old_iph->frag_off&htons(IP_DF));
890
891 if (!skb_is_gso(skb) &&
892 (old_iph->frag_off&htons(IP_DF)) &&
893 mtu < ntohs(old_iph->tot_len)) {
894 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
895 ip_rt_put(rt);
896 goto tx_error;
897 }
898 }
899 #if IS_ENABLED(CONFIG_IPV6)
900 else if (skb->protocol == htons(ETH_P_IPV6)) {
901 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
902
903 if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
904 if ((tunnel->parms.iph.daddr &&
905 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
906 rt6->rt6i_dst.plen == 128) {
907 rt6->rt6i_flags |= RTF_MODIFIED;
908 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
909 }
910 }
911
912 if (!skb_is_gso(skb) &&
913 mtu >= IPV6_MIN_MTU &&
914 mtu < skb->len - tunnel->hlen + gre_hlen) {
915 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
916 ip_rt_put(rt);
917 goto tx_error;
918 }
919 }
920 #endif
921
922 if (tunnel->err_count > 0) {
923 if (time_before(jiffies,
924 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
925 tunnel->err_count--;
926
927 dst_link_failure(skb);
928 } else
929 tunnel->err_count = 0;
930 }
931
932 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len;
933
934 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
935 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
936 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
937 if (max_headroom > dev->needed_headroom)
938 dev->needed_headroom = max_headroom;
939 if (!new_skb) {
940 ip_rt_put(rt);
941 dev->stats.tx_dropped++;
942 dev_kfree_skb(skb);
943 return NETDEV_TX_OK;
944 }
945 if (skb->sk)
946 skb_set_owner_w(new_skb, skb->sk);
947 dev_kfree_skb(skb);
948 skb = new_skb;
949 old_iph = ip_hdr(skb);
950 /* Warning : tiph value might point to freed memory */
951 }
952
953 skb_push(skb, gre_hlen);
954 skb_reset_network_header(skb);
955 skb_set_transport_header(skb, sizeof(*iph));
956 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
957 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
958 IPSKB_REROUTED);
959 skb_dst_drop(skb);
960 skb_dst_set(skb, &rt->dst);
961
962 /*
963 * Push down and install the IPIP header.
964 */
965
966 iph = ip_hdr(skb);
967 iph->version = 4;
968 iph->ihl = sizeof(struct iphdr) >> 2;
969 iph->frag_off = df;
970 iph->protocol = IPPROTO_GRE;
971 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
972 iph->daddr = fl4.daddr;
973 iph->saddr = fl4.saddr;
974 iph->ttl = ttl;
975
976 tunnel_ip_select_ident(skb, old_iph, &rt->dst);
977
978 if (ttl == 0) {
979 if (skb->protocol == htons(ETH_P_IP))
980 iph->ttl = old_iph->ttl;
981 #if IS_ENABLED(CONFIG_IPV6)
982 else if (skb->protocol == htons(ETH_P_IPV6))
983 iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
984 #endif
985 else
986 iph->ttl = ip4_dst_hoplimit(&rt->dst);
987 }
988
989 ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
990 ((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ?
991 htons(ETH_P_TEB) : skb->protocol;
992
993 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
994 __be32 *ptr = (__be32 *)(((u8 *)iph) + tunnel->hlen - 4);
995
996 if (tunnel->parms.o_flags&GRE_SEQ) {
997 ++tunnel->o_seqno;
998 *ptr = htonl(tunnel->o_seqno);
999 ptr--;
1000 }
1001 if (tunnel->parms.o_flags&GRE_KEY) {
1002 *ptr = tunnel->parms.o_key;
1003 ptr--;
1004 }
1005 /* Skip GRE checksum if skb is getting offloaded. */
1006 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE) &&
1007 (tunnel->parms.o_flags&GRE_CSUM)) {
1008 int offset = skb_transport_offset(skb);
1009
1010 if (skb_has_shared_frag(skb)) {
1011 err = __skb_linearize(skb);
1012 if (err)
1013 goto tx_error;
1014 }
1015
1016 *ptr = 0;
1017 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset,
1018 skb->len - offset,
1019 0));
1020 }
1021 }
1022
1023 iptunnel_xmit(skb, dev);
1024 return NETDEV_TX_OK;
1025
1026 #if IS_ENABLED(CONFIG_IPV6)
1027 tx_error_icmp:
1028 dst_link_failure(skb);
1029 #endif
1030 tx_error:
1031 dev->stats.tx_errors++;
1032 dev_kfree_skb(skb);
1033 return NETDEV_TX_OK;
1034 }
1035
1036 static int ipgre_tunnel_bind_dev(struct net_device *dev)
1037 {
1038 struct net_device *tdev = NULL;
1039 struct ip_tunnel *tunnel;
1040 const struct iphdr *iph;
1041 int hlen = LL_MAX_HEADER;
1042 int mtu = ETH_DATA_LEN;
1043 int addend = sizeof(struct iphdr) + 4;
1044
1045 tunnel = netdev_priv(dev);
1046 iph = &tunnel->parms.iph;
1047
1048 /* Guess output device to choose reasonable mtu and needed_headroom */
1049
1050 if (iph->daddr) {
1051 struct flowi4 fl4;
1052 struct rtable *rt;
1053
1054 rt = ip_route_output_gre(dev_net(dev), &fl4,
1055 iph->daddr, iph->saddr,
1056 tunnel->parms.o_key,
1057 RT_TOS(iph->tos),
1058 tunnel->parms.link);
1059 if (!IS_ERR(rt)) {
1060 tdev = rt->dst.dev;
1061 ip_rt_put(rt);
1062 }
1063
1064 if (dev->type != ARPHRD_ETHER)
1065 dev->flags |= IFF_POINTOPOINT;
1066 }
1067
1068 if (!tdev && tunnel->parms.link)
1069 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
1070
1071 if (tdev) {
1072 hlen = tdev->hard_header_len + tdev->needed_headroom;
1073 mtu = tdev->mtu;
1074 }
1075 dev->iflink = tunnel->parms.link;
1076
1077 /* Precalculate GRE options length */
1078 if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
1079 if (tunnel->parms.o_flags&GRE_CSUM)
1080 addend += 4;
1081 if (tunnel->parms.o_flags&GRE_KEY)
1082 addend += 4;
1083 if (tunnel->parms.o_flags&GRE_SEQ)
1084 addend += 4;
1085 }
1086 dev->needed_headroom = addend + hlen;
1087 mtu -= dev->hard_header_len + addend;
1088
1089 if (mtu < 68)
1090 mtu = 68;
1091
1092 tunnel->hlen = addend;
1093 /* TCP offload with GRE SEQ is not supported. */
1094 if (!(tunnel->parms.o_flags & GRE_SEQ)) {
1095 dev->features |= NETIF_F_GSO_SOFTWARE;
1096 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1097 }
1098
1099 return mtu;
1100 }
1101
1102 static int
1103 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1104 {
1105 int err = 0;
1106 struct ip_tunnel_parm p;
1107 struct ip_tunnel *t;
1108 struct net *net = dev_net(dev);
1109 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1110
1111 switch (cmd) {
1112 case SIOCGETTUNNEL:
1113 t = NULL;
1114 if (dev == ign->fb_tunnel_dev) {
1115 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1116 err = -EFAULT;
1117 break;
1118 }
1119 t = ipgre_tunnel_locate(net, &p, 0);
1120 }
1121 if (t == NULL)
1122 t = netdev_priv(dev);
1123 memcpy(&p, &t->parms, sizeof(p));
1124 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1125 err = -EFAULT;
1126 break;
1127
1128 case SIOCADDTUNNEL:
1129 case SIOCCHGTUNNEL:
1130 err = -EPERM;
1131 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1132 goto done;
1133
1134 err = -EFAULT;
1135 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1136 goto done;
1137
1138 err = -EINVAL;
1139 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
1140 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
1141 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
1142 goto done;
1143 if (p.iph.ttl)
1144 p.iph.frag_off |= htons(IP_DF);
1145
1146 if (!(p.i_flags&GRE_KEY))
1147 p.i_key = 0;
1148 if (!(p.o_flags&GRE_KEY))
1149 p.o_key = 0;
1150
1151 t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
1152
1153 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1154 if (t != NULL) {
1155 if (t->dev != dev) {
1156 err = -EEXIST;
1157 break;
1158 }
1159 } else {
1160 unsigned int nflags = 0;
1161
1162 t = netdev_priv(dev);
1163
1164 if (ipv4_is_multicast(p.iph.daddr))
1165 nflags = IFF_BROADCAST;
1166 else if (p.iph.daddr)
1167 nflags = IFF_POINTOPOINT;
1168
1169 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1170 err = -EINVAL;
1171 break;
1172 }
1173 ipgre_tunnel_unlink(ign, t);
1174 synchronize_net();
1175 t->parms.iph.saddr = p.iph.saddr;
1176 t->parms.iph.daddr = p.iph.daddr;
1177 t->parms.i_key = p.i_key;
1178 t->parms.o_key = p.o_key;
1179 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1180 memcpy(dev->broadcast, &p.iph.daddr, 4);
1181 ipgre_tunnel_link(ign, t);
1182 netdev_state_change(dev);
1183 }
1184 }
1185
1186 if (t) {
1187 err = 0;
1188 if (cmd == SIOCCHGTUNNEL) {
1189 t->parms.iph.ttl = p.iph.ttl;
1190 t->parms.iph.tos = p.iph.tos;
1191 t->parms.iph.frag_off = p.iph.frag_off;
1192 if (t->parms.link != p.link) {
1193 t->parms.link = p.link;
1194 dev->mtu = ipgre_tunnel_bind_dev(dev);
1195 netdev_state_change(dev);
1196 }
1197 }
1198 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1199 err = -EFAULT;
1200 } else
1201 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1202 break;
1203
1204 case SIOCDELTUNNEL:
1205 err = -EPERM;
1206 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1207 goto done;
1208
1209 if (dev == ign->fb_tunnel_dev) {
1210 err = -EFAULT;
1211 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1212 goto done;
1213 err = -ENOENT;
1214 if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL)
1215 goto done;
1216 err = -EPERM;
1217 if (t == netdev_priv(ign->fb_tunnel_dev))
1218 goto done;
1219 dev = t->dev;
1220 }
1221 unregister_netdevice(dev);
1222 err = 0;
1223 break;
1224
1225 default:
1226 err = -EINVAL;
1227 }
1228
1229 done:
1230 return err;
1231 }
1232
1233 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1234 {
1235 struct ip_tunnel *tunnel = netdev_priv(dev);
1236 if (new_mtu < 68 ||
1237 new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
1238 return -EINVAL;
1239 dev->mtu = new_mtu;
1240 return 0;
1241 }
1242
1243 /* Nice toy. Unfortunately, useless in real life :-)
1244 It allows to construct virtual multiprotocol broadcast "LAN"
1245 over the Internet, provided multicast routing is tuned.
1246
1247
1248 I have no idea was this bicycle invented before me,
1249 so that I had to set ARPHRD_IPGRE to a random value.
1250 I have an impression, that Cisco could make something similar,
1251 but this feature is apparently missing in IOS<=11.2(8).
1252
1253 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1254 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1255
1256 ping -t 255 224.66.66.66
1257
1258 If nobody answers, mbone does not work.
1259
1260 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1261 ip addr add 10.66.66.<somewhat>/24 dev Universe
1262 ifconfig Universe up
1263 ifconfig Universe add fe80::<Your_real_addr>/10
1264 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1265 ftp 10.66.66.66
1266 ...
1267 ftp fec0:6666:6666::193.233.7.65
1268 ...
1269
1270 */
1271
1272 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1273 unsigned short type,
1274 const void *daddr, const void *saddr, unsigned int len)
1275 {
1276 struct ip_tunnel *t = netdev_priv(dev);
1277 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1278 __be16 *p = (__be16 *)(iph+1);
1279
1280 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1281 p[0] = t->parms.o_flags;
1282 p[1] = htons(type);
1283
1284 /*
1285 * Set the source hardware address.
1286 */
1287
1288 if (saddr)
1289 memcpy(&iph->saddr, saddr, 4);
1290 if (daddr)
1291 memcpy(&iph->daddr, daddr, 4);
1292 if (iph->daddr)
1293 return t->hlen;
1294
1295 return -t->hlen;
1296 }
1297
1298 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1299 {
1300 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
1301 memcpy(haddr, &iph->saddr, 4);
1302 return 4;
1303 }
1304
1305 static const struct header_ops ipgre_header_ops = {
1306 .create = ipgre_header,
1307 .parse = ipgre_header_parse,
1308 };
1309
1310 #ifdef CONFIG_NET_IPGRE_BROADCAST
1311 static int ipgre_open(struct net_device *dev)
1312 {
1313 struct ip_tunnel *t = netdev_priv(dev);
1314
1315 if (ipv4_is_multicast(t->parms.iph.daddr)) {
1316 struct flowi4 fl4;
1317 struct rtable *rt;
1318
1319 rt = ip_route_output_gre(dev_net(dev), &fl4,
1320 t->parms.iph.daddr,
1321 t->parms.iph.saddr,
1322 t->parms.o_key,
1323 RT_TOS(t->parms.iph.tos),
1324 t->parms.link);
1325 if (IS_ERR(rt))
1326 return -EADDRNOTAVAIL;
1327 dev = rt->dst.dev;
1328 ip_rt_put(rt);
1329 if (__in_dev_get_rtnl(dev) == NULL)
1330 return -EADDRNOTAVAIL;
1331 t->mlink = dev->ifindex;
1332 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1333 }
1334 return 0;
1335 }
1336
1337 static int ipgre_close(struct net_device *dev)
1338 {
1339 struct ip_tunnel *t = netdev_priv(dev);
1340
1341 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1342 struct in_device *in_dev;
1343 in_dev = inetdev_by_index(dev_net(dev), t->mlink);
1344 if (in_dev)
1345 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1346 }
1347 return 0;
1348 }
1349
1350 #endif
1351
1352 static const struct net_device_ops ipgre_netdev_ops = {
1353 .ndo_init = ipgre_tunnel_init,
1354 .ndo_uninit = ipgre_tunnel_uninit,
1355 #ifdef CONFIG_NET_IPGRE_BROADCAST
1356 .ndo_open = ipgre_open,
1357 .ndo_stop = ipgre_close,
1358 #endif
1359 .ndo_start_xmit = ipgre_tunnel_xmit,
1360 .ndo_do_ioctl = ipgre_tunnel_ioctl,
1361 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1362 .ndo_get_stats64 = ipgre_get_stats64,
1363 };
1364
1365 static void ipgre_dev_free(struct net_device *dev)
1366 {
1367 struct ip_tunnel *tunnel = netdev_priv(dev);
1368
1369 gro_cells_destroy(&tunnel->gro_cells);
1370 free_percpu(dev->tstats);
1371 free_netdev(dev);
1372 }
1373
1374 #define GRE_FEATURES (NETIF_F_SG | \
1375 NETIF_F_FRAGLIST | \
1376 NETIF_F_HIGHDMA | \
1377 NETIF_F_HW_CSUM)
1378
1379 static void ipgre_tunnel_setup(struct net_device *dev)
1380 {
1381 dev->netdev_ops = &ipgre_netdev_ops;
1382 dev->destructor = ipgre_dev_free;
1383
1384 dev->type = ARPHRD_IPGRE;
1385 dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1386 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1387 dev->flags = IFF_NOARP;
1388 dev->iflink = 0;
1389 dev->addr_len = 4;
1390 dev->features |= NETIF_F_NETNS_LOCAL;
1391 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1392
1393 dev->features |= GRE_FEATURES;
1394 dev->hw_features |= GRE_FEATURES;
1395 }
1396
1397 static int ipgre_tunnel_init(struct net_device *dev)
1398 {
1399 struct ip_tunnel *tunnel;
1400 struct iphdr *iph;
1401 int err;
1402
1403 tunnel = netdev_priv(dev);
1404 iph = &tunnel->parms.iph;
1405
1406 tunnel->dev = dev;
1407 strcpy(tunnel->parms.name, dev->name);
1408
1409 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1410 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1411
1412 if (iph->daddr) {
1413 #ifdef CONFIG_NET_IPGRE_BROADCAST
1414 if (ipv4_is_multicast(iph->daddr)) {
1415 if (!iph->saddr)
1416 return -EINVAL;
1417 dev->flags = IFF_BROADCAST;
1418 dev->header_ops = &ipgre_header_ops;
1419 }
1420 #endif
1421 } else
1422 dev->header_ops = &ipgre_header_ops;
1423
1424 dev->tstats = alloc_percpu(struct pcpu_tstats);
1425 if (!dev->tstats)
1426 return -ENOMEM;
1427
1428 err = gro_cells_init(&tunnel->gro_cells, dev);
1429 if (err) {
1430 free_percpu(dev->tstats);
1431 return err;
1432 }
1433
1434 return 0;
1435 }
1436
1437 static void ipgre_fb_tunnel_init(struct net_device *dev)
1438 {
1439 struct ip_tunnel *tunnel = netdev_priv(dev);
1440 struct iphdr *iph = &tunnel->parms.iph;
1441
1442 tunnel->dev = dev;
1443 strcpy(tunnel->parms.name, dev->name);
1444
1445 iph->version = 4;
1446 iph->protocol = IPPROTO_GRE;
1447 iph->ihl = 5;
1448 tunnel->hlen = sizeof(struct iphdr) + 4;
1449
1450 dev_hold(dev);
1451 }
1452
1453
1454 static const struct gre_protocol ipgre_protocol = {
1455 .handler = ipgre_rcv,
1456 .err_handler = ipgre_err,
1457 };
1458
1459 static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1460 {
1461 int prio;
1462
1463 for (prio = 0; prio < 4; prio++) {
1464 int h;
1465 for (h = 0; h < HASH_SIZE; h++) {
1466 struct ip_tunnel *t;
1467
1468 t = rtnl_dereference(ign->tunnels[prio][h]);
1469
1470 while (t != NULL) {
1471 unregister_netdevice_queue(t->dev, head);
1472 t = rtnl_dereference(t->next);
1473 }
1474 }
1475 }
1476 }
1477
1478 static int __net_init ipgre_init_net(struct net *net)
1479 {
1480 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1481 int err;
1482
1483 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1484 ipgre_tunnel_setup);
1485 if (!ign->fb_tunnel_dev) {
1486 err = -ENOMEM;
1487 goto err_alloc_dev;
1488 }
1489 dev_net_set(ign->fb_tunnel_dev, net);
1490
1491 ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
1492 ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
1493
1494 if ((err = register_netdev(ign->fb_tunnel_dev)))
1495 goto err_reg_dev;
1496
1497 rcu_assign_pointer(ign->tunnels_wc[0],
1498 netdev_priv(ign->fb_tunnel_dev));
1499 return 0;
1500
1501 err_reg_dev:
1502 ipgre_dev_free(ign->fb_tunnel_dev);
1503 err_alloc_dev:
1504 return err;
1505 }
1506
1507 static void __net_exit ipgre_exit_net(struct net *net)
1508 {
1509 struct ipgre_net *ign;
1510 LIST_HEAD(list);
1511
1512 ign = net_generic(net, ipgre_net_id);
1513 rtnl_lock();
1514 ipgre_destroy_tunnels(ign, &list);
1515 unregister_netdevice_many(&list);
1516 rtnl_unlock();
1517 }
1518
1519 static struct pernet_operations ipgre_net_ops = {
1520 .init = ipgre_init_net,
1521 .exit = ipgre_exit_net,
1522 .id = &ipgre_net_id,
1523 .size = sizeof(struct ipgre_net),
1524 };
1525
1526 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
1527 {
1528 __be16 flags;
1529
1530 if (!data)
1531 return 0;
1532
1533 flags = 0;
1534 if (data[IFLA_GRE_IFLAGS])
1535 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1536 if (data[IFLA_GRE_OFLAGS])
1537 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1538 if (flags & (GRE_VERSION|GRE_ROUTING))
1539 return -EINVAL;
1540
1541 return 0;
1542 }
1543
1544 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
1545 {
1546 __be32 daddr;
1547
1548 if (tb[IFLA_ADDRESS]) {
1549 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1550 return -EINVAL;
1551 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1552 return -EADDRNOTAVAIL;
1553 }
1554
1555 if (!data)
1556 goto out;
1557
1558 if (data[IFLA_GRE_REMOTE]) {
1559 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1560 if (!daddr)
1561 return -EINVAL;
1562 }
1563
1564 out:
1565 return ipgre_tunnel_validate(tb, data);
1566 }
1567
1568 static void ipgre_netlink_parms(struct nlattr *data[],
1569 struct ip_tunnel_parm *parms)
1570 {
1571 memset(parms, 0, sizeof(*parms));
1572
1573 parms->iph.protocol = IPPROTO_GRE;
1574
1575 if (!data)
1576 return;
1577
1578 if (data[IFLA_GRE_LINK])
1579 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1580
1581 if (data[IFLA_GRE_IFLAGS])
1582 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
1583
1584 if (data[IFLA_GRE_OFLAGS])
1585 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
1586
1587 if (data[IFLA_GRE_IKEY])
1588 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1589
1590 if (data[IFLA_GRE_OKEY])
1591 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1592
1593 if (data[IFLA_GRE_LOCAL])
1594 parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);
1595
1596 if (data[IFLA_GRE_REMOTE])
1597 parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);
1598
1599 if (data[IFLA_GRE_TTL])
1600 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1601
1602 if (data[IFLA_GRE_TOS])
1603 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1604
1605 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
1606 parms->iph.frag_off = htons(IP_DF);
1607 }
1608
1609 static int ipgre_tap_init(struct net_device *dev)
1610 {
1611 struct ip_tunnel *tunnel;
1612
1613 tunnel = netdev_priv(dev);
1614
1615 tunnel->dev = dev;
1616 strcpy(tunnel->parms.name, dev->name);
1617
1618 ipgre_tunnel_bind_dev(dev);
1619
1620 dev->tstats = alloc_percpu(struct pcpu_tstats);
1621 if (!dev->tstats)
1622 return -ENOMEM;
1623
1624 return 0;
1625 }
1626
1627 static const struct net_device_ops ipgre_tap_netdev_ops = {
1628 .ndo_init = ipgre_tap_init,
1629 .ndo_uninit = ipgre_tunnel_uninit,
1630 .ndo_start_xmit = ipgre_tunnel_xmit,
1631 .ndo_set_mac_address = eth_mac_addr,
1632 .ndo_validate_addr = eth_validate_addr,
1633 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1634 .ndo_get_stats64 = ipgre_get_stats64,
1635 };
1636
1637 static void ipgre_tap_setup(struct net_device *dev)
1638 {
1639
1640 ether_setup(dev);
1641
1642 dev->netdev_ops = &ipgre_tap_netdev_ops;
1643 dev->destructor = ipgre_dev_free;
1644
1645 dev->iflink = 0;
1646 dev->features |= NETIF_F_NETNS_LOCAL;
1647
1648 dev->features |= GRE_FEATURES;
1649 dev->hw_features |= GRE_FEATURES;
1650 }
1651
1652 static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
1653 struct nlattr *data[])
1654 {
1655 struct ip_tunnel *nt;
1656 struct net *net = dev_net(dev);
1657 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1658 int mtu;
1659 int err;
1660
1661 nt = netdev_priv(dev);
1662 ipgre_netlink_parms(data, &nt->parms);
1663
1664 if (ipgre_tunnel_find(net, &nt->parms, dev->type))
1665 return -EEXIST;
1666
1667 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1668 eth_hw_addr_random(dev);
1669
1670 mtu = ipgre_tunnel_bind_dev(dev);
1671 if (!tb[IFLA_MTU])
1672 dev->mtu = mtu;
1673
1674 /* Can use a lockless transmit, unless we generate output sequences */
1675 if (!(nt->parms.o_flags & GRE_SEQ))
1676 dev->features |= NETIF_F_LLTX;
1677
1678 err = register_netdevice(dev);
1679 if (err)
1680 goto out;
1681
1682 dev_hold(dev);
1683 ipgre_tunnel_link(ign, nt);
1684
1685 out:
1686 return err;
1687 }
1688
1689 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1690 struct nlattr *data[])
1691 {
1692 struct ip_tunnel *t, *nt;
1693 struct net *net = dev_net(dev);
1694 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1695 struct ip_tunnel_parm p;
1696 int mtu;
1697
1698 if (dev == ign->fb_tunnel_dev)
1699 return -EINVAL;
1700
1701 nt = netdev_priv(dev);
1702 ipgre_netlink_parms(data, &p);
1703
1704 t = ipgre_tunnel_locate(net, &p, 0);
1705
1706 if (t) {
1707 if (t->dev != dev)
1708 return -EEXIST;
1709 } else {
1710 t = nt;
1711
1712 if (dev->type != ARPHRD_ETHER) {
1713 unsigned int nflags = 0;
1714
1715 if (ipv4_is_multicast(p.iph.daddr))
1716 nflags = IFF_BROADCAST;
1717 else if (p.iph.daddr)
1718 nflags = IFF_POINTOPOINT;
1719
1720 if ((dev->flags ^ nflags) &
1721 (IFF_POINTOPOINT | IFF_BROADCAST))
1722 return -EINVAL;
1723 }
1724
1725 ipgre_tunnel_unlink(ign, t);
1726 t->parms.iph.saddr = p.iph.saddr;
1727 t->parms.iph.daddr = p.iph.daddr;
1728 t->parms.i_key = p.i_key;
1729 if (dev->type != ARPHRD_ETHER) {
1730 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1731 memcpy(dev->broadcast, &p.iph.daddr, 4);
1732 }
1733 ipgre_tunnel_link(ign, t);
1734 netdev_state_change(dev);
1735 }
1736
1737 t->parms.o_key = p.o_key;
1738 t->parms.iph.ttl = p.iph.ttl;
1739 t->parms.iph.tos = p.iph.tos;
1740 t->parms.iph.frag_off = p.iph.frag_off;
1741
1742 if (t->parms.link != p.link) {
1743 t->parms.link = p.link;
1744 mtu = ipgre_tunnel_bind_dev(dev);
1745 if (!tb[IFLA_MTU])
1746 dev->mtu = mtu;
1747 netdev_state_change(dev);
1748 }
1749
1750 return 0;
1751 }
1752
1753 static size_t ipgre_get_size(const struct net_device *dev)
1754 {
1755 return
1756 /* IFLA_GRE_LINK */
1757 nla_total_size(4) +
1758 /* IFLA_GRE_IFLAGS */
1759 nla_total_size(2) +
1760 /* IFLA_GRE_OFLAGS */
1761 nla_total_size(2) +
1762 /* IFLA_GRE_IKEY */
1763 nla_total_size(4) +
1764 /* IFLA_GRE_OKEY */
1765 nla_total_size(4) +
1766 /* IFLA_GRE_LOCAL */
1767 nla_total_size(4) +
1768 /* IFLA_GRE_REMOTE */
1769 nla_total_size(4) +
1770 /* IFLA_GRE_TTL */
1771 nla_total_size(1) +
1772 /* IFLA_GRE_TOS */
1773 nla_total_size(1) +
1774 /* IFLA_GRE_PMTUDISC */
1775 nla_total_size(1) +
1776 0;
1777 }
1778
1779 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1780 {
1781 struct ip_tunnel *t = netdev_priv(dev);
1782 struct ip_tunnel_parm *p = &t->parms;
1783
1784 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1785 nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
1786 nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
1787 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1788 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1789 nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1790 nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1791 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1792 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1793 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1794 !!(p->iph.frag_off & htons(IP_DF))))
1795 goto nla_put_failure;
1796 return 0;
1797
1798 nla_put_failure:
1799 return -EMSGSIZE;
1800 }
1801
1802 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1803 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1804 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1805 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1806 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1807 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1808 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1809 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1810 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1811 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1812 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1813 };
1814
1815 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1816 .kind = "gre",
1817 .maxtype = IFLA_GRE_MAX,
1818 .policy = ipgre_policy,
1819 .priv_size = sizeof(struct ip_tunnel),
1820 .setup = ipgre_tunnel_setup,
1821 .validate = ipgre_tunnel_validate,
1822 .newlink = ipgre_newlink,
1823 .changelink = ipgre_changelink,
1824 .get_size = ipgre_get_size,
1825 .fill_info = ipgre_fill_info,
1826 };
1827
1828 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1829 .kind = "gretap",
1830 .maxtype = IFLA_GRE_MAX,
1831 .policy = ipgre_policy,
1832 .priv_size = sizeof(struct ip_tunnel),
1833 .setup = ipgre_tap_setup,
1834 .validate = ipgre_tap_validate,
1835 .newlink = ipgre_newlink,
1836 .changelink = ipgre_changelink,
1837 .get_size = ipgre_get_size,
1838 .fill_info = ipgre_fill_info,
1839 };
1840
1841 /*
1842 * And now the modules code and kernel interface.
1843 */
1844
1845 static int __init ipgre_init(void)
1846 {
1847 int err;
1848
1849 pr_info("GRE over IPv4 tunneling driver\n");
1850
1851 err = register_pernet_device(&ipgre_net_ops);
1852 if (err < 0)
1853 return err;
1854
1855 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1856 if (err < 0) {
1857 pr_info("%s: can't add protocol\n", __func__);
1858 goto add_proto_failed;
1859 }
1860
1861 err = rtnl_link_register(&ipgre_link_ops);
1862 if (err < 0)
1863 goto rtnl_link_failed;
1864
1865 err = rtnl_link_register(&ipgre_tap_ops);
1866 if (err < 0)
1867 goto tap_ops_failed;
1868
1869 out:
1870 return err;
1871
1872 tap_ops_failed:
1873 rtnl_link_unregister(&ipgre_link_ops);
1874 rtnl_link_failed:
1875 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1876 add_proto_failed:
1877 unregister_pernet_device(&ipgre_net_ops);
1878 goto out;
1879 }
1880
1881 static void __exit ipgre_fini(void)
1882 {
1883 rtnl_link_unregister(&ipgre_tap_ops);
1884 rtnl_link_unregister(&ipgre_link_ops);
1885 if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
1886 pr_info("%s: can't remove protocol\n", __func__);
1887 unregister_pernet_device(&ipgre_net_ops);
1888 }
1889
1890 module_init(ipgre_init);
1891 module_exit(ipgre_fini);
1892 MODULE_LICENSE("GPL");
1893 MODULE_ALIAS_RTNL_LINK("gre");
1894 MODULE_ALIAS_RTNL_LINK("gretap");
1895 MODULE_ALIAS_NETDEV("gre0");
This page took 0.10941 seconds and 5 git commands to generate.