ipv6: Create ip6_tnl_xmit
[deliverable/linux.git] / net / ipv4 / ip_gre.c
1 /*
2 * Linux NET3: GRE over IP protocol decoder.
3 *
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35
36 #include <net/sock.h>
37 #include <net/ip.h>
38 #include <net/icmp.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/gre.h>
50 #include <net/dst_metadata.h>
51
52 #if IS_ENABLED(CONFIG_IPV6)
53 #include <net/ipv6.h>
54 #include <net/ip6_fib.h>
55 #include <net/ip6_route.h>
56 #endif
57
58 /*
59 Problems & solutions
60 --------------------
61
62 1. The most important issue is detecting local dead loops.
63 They would cause complete host lockup in transmit, which
64 would be "resolved" by stack overflow or, if queueing is enabled,
65 with infinite looping in net_bh.
66
67 We cannot track such dead loops during route installation,
68 it is infeasible task. The most general solutions would be
69 to keep skb->encapsulation counter (sort of local ttl),
70 and silently drop packet when it expires. It is a good
71 solution, but it supposes maintaining new variable in ALL
72 skb, even if no tunneling is used.
73
74 Current solution: xmit_recursion breaks dead loops. This is a percpu
75 counter, since when we enter the first ndo_xmit(), cpu migration is
76 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
77
78 2. Networking dead loops would not kill routers, but would really
79 kill network. IP hop limit plays role of "t->recursion" in this case,
80 if we copy it from packet being encapsulated to upper header.
81 It is very good solution, but it introduces two problems:
82
83 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
84 do not work over tunnels.
85 - traceroute does not work. I planned to relay ICMP from tunnel,
86 so that this problem would be solved and traceroute output
87 would even more informative. This idea appeared to be wrong:
88 only Linux complies to rfc1812 now (yes, guys, Linux is the only
89 true router now :-)), all routers (at least, in neighbourhood of mine)
90 return only 8 bytes of payload. It is the end.
91
92 Hence, if we want that OSPF worked or traceroute said something reasonable,
93 we should search for another solution.
94
95 One of them is to parse packet trying to detect inner encapsulation
96 made by our node. It is difficult or even impossible, especially,
97 taking into account fragmentation. TO be short, ttl is not solution at all.
98
99 Current solution: The solution was UNEXPECTEDLY SIMPLE.
100 We force DF flag on tunnels with preconfigured hop limit,
101 that is ALL. :-) Well, it does not remove the problem completely,
102 but exponential growth of network traffic is changed to linear
103 (branches, that exceed pmtu are pruned) and tunnel mtu
104 rapidly degrades to value <68, where looping stops.
105 Yes, it is not good if there exists a router in the loop,
106 which does not force DF, even when encapsulating packets have DF set.
107 But it is not our problem! Nobody could accuse us, we made
108 all that we could make. Even if it is your gated who injected
109 fatal route to network, even if it were you who configured
110 fatal static route: you are innocent. :-)
111
112 Alexey Kuznetsov.
113 */
114
115 static bool log_ecn_error = true;
116 module_param(log_ecn_error, bool, 0644);
117 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
118
119 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
120 static int ipgre_tunnel_init(struct net_device *dev);
121
122 static int ipgre_net_id __read_mostly;
123 static int gre_tap_net_id __read_mostly;
124
125 static void ipgre_err(struct sk_buff *skb, u32 info,
126 const struct tnl_ptk_info *tpi)
127 {
128
129 /* All the routers (except for Linux) return only
130 8 bytes of packet payload. It means, that precise relaying of
131 ICMP in the real Internet is absolutely infeasible.
132
133 Moreover, Cisco "wise men" put GRE key to the third word
134 in GRE header. It makes impossible maintaining even soft
135 state for keyed GRE tunnels with enabled checksum. Tell
136 them "thank you".
137
138 Well, I wonder, rfc1812 was written by Cisco employee,
139 what the hell these idiots break standards established
140 by themselves???
141 */
142 struct net *net = dev_net(skb->dev);
143 struct ip_tunnel_net *itn;
144 const struct iphdr *iph;
145 const int type = icmp_hdr(skb)->type;
146 const int code = icmp_hdr(skb)->code;
147 struct ip_tunnel *t;
148
149 switch (type) {
150 default:
151 case ICMP_PARAMETERPROB:
152 return;
153
154 case ICMP_DEST_UNREACH:
155 switch (code) {
156 case ICMP_SR_FAILED:
157 case ICMP_PORT_UNREACH:
158 /* Impossible event. */
159 return;
160 default:
161 /* All others are translated to HOST_UNREACH.
162 rfc2003 contains "deep thoughts" about NET_UNREACH,
163 I believe they are just ether pollution. --ANK
164 */
165 break;
166 }
167 break;
168
169 case ICMP_TIME_EXCEEDED:
170 if (code != ICMP_EXC_TTL)
171 return;
172 break;
173
174 case ICMP_REDIRECT:
175 break;
176 }
177
178 if (tpi->proto == htons(ETH_P_TEB))
179 itn = net_generic(net, gre_tap_net_id);
180 else
181 itn = net_generic(net, ipgre_net_id);
182
183 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
184 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
185 iph->daddr, iph->saddr, tpi->key);
186
187 if (!t)
188 return;
189
190 if (t->parms.iph.daddr == 0 ||
191 ipv4_is_multicast(t->parms.iph.daddr))
192 return;
193
194 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
195 return;
196
197 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
198 t->err_count++;
199 else
200 t->err_count = 1;
201 t->err_time = jiffies;
202 }
203
204 static void gre_err(struct sk_buff *skb, u32 info)
205 {
206 /* All the routers (except for Linux) return only
207 * 8 bytes of packet payload. It means, that precise relaying of
208 * ICMP in the real Internet is absolutely infeasible.
209 *
210 * Moreover, Cisco "wise men" put GRE key to the third word
211 * in GRE header. It makes impossible maintaining even soft
212 * state for keyed
213 * GRE tunnels with enabled checksum. Tell them "thank you".
214 *
215 * Well, I wonder, rfc1812 was written by Cisco employee,
216 * what the hell these idiots break standards established
217 * by themselves???
218 */
219
220 const int type = icmp_hdr(skb)->type;
221 const int code = icmp_hdr(skb)->code;
222 struct tnl_ptk_info tpi;
223 bool csum_err = false;
224 int hdr_len;
225
226 if (gre_parse_header(skb, &tpi, &csum_err, &hdr_len)) {
227 if (!csum_err) /* ignore csum errors. */
228 return;
229 }
230
231 if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
232 return;
233
234 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
235 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
236 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
237 return;
238 }
239 if (type == ICMP_REDIRECT) {
240 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
241 IPPROTO_GRE, 0);
242 return;
243 }
244
245 ipgre_err(skb, info, &tpi);
246 }
247
248 static __be64 key_to_tunnel_id(__be32 key)
249 {
250 #ifdef __BIG_ENDIAN
251 return (__force __be64)((__force u32)key);
252 #else
253 return (__force __be64)((__force u64)key << 32);
254 #endif
255 }
256
257 /* Returns the least-significant 32 bits of a __be64. */
258 static __be32 tunnel_id_to_key(__be64 x)
259 {
260 #ifdef __BIG_ENDIAN
261 return (__force __be32)x;
262 #else
263 return (__force __be32)((__force u64)x >> 32);
264 #endif
265 }
266
267 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
268 {
269 struct net *net = dev_net(skb->dev);
270 struct metadata_dst *tun_dst = NULL;
271 struct ip_tunnel_net *itn;
272 const struct iphdr *iph;
273 struct ip_tunnel *tunnel;
274
275 if (tpi->proto == htons(ETH_P_TEB))
276 itn = net_generic(net, gre_tap_net_id);
277 else
278 itn = net_generic(net, ipgre_net_id);
279
280 iph = ip_hdr(skb);
281 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
282 iph->saddr, iph->daddr, tpi->key);
283
284 if (tunnel) {
285 skb_pop_mac_header(skb);
286 if (tunnel->collect_md) {
287 __be16 flags;
288 __be64 tun_id;
289
290 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
291 tun_id = key_to_tunnel_id(tpi->key);
292 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
293 if (!tun_dst)
294 return PACKET_REJECT;
295 }
296
297 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
298 return PACKET_RCVD;
299 }
300 return PACKET_REJECT;
301 }
302
303 static int gre_rcv(struct sk_buff *skb)
304 {
305 struct tnl_ptk_info tpi;
306 bool csum_err = false;
307 int hdr_len;
308
309 #ifdef CONFIG_NET_IPGRE_BROADCAST
310 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
311 /* Looped back packet, drop it! */
312 if (rt_is_output_route(skb_rtable(skb)))
313 goto drop;
314 }
315 #endif
316
317 if (gre_parse_header(skb, &tpi, &csum_err, &hdr_len) < 0)
318 goto drop;
319
320 if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
321 goto drop;
322
323 if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
324 return 0;
325
326 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
327 drop:
328 kfree_skb(skb);
329 return 0;
330 }
331
332 static __sum16 gre_checksum(struct sk_buff *skb)
333 {
334 __wsum csum;
335
336 if (skb->ip_summed == CHECKSUM_PARTIAL)
337 csum = lco_csum(skb);
338 else
339 csum = skb_checksum(skb, 0, skb->len, 0);
340 return csum_fold(csum);
341 }
342
343 static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags,
344 __be16 proto, __be32 key, __be32 seq)
345 {
346 struct gre_base_hdr *greh;
347
348 skb_push(skb, hdr_len);
349
350 skb_reset_transport_header(skb);
351 greh = (struct gre_base_hdr *)skb->data;
352 greh->flags = gre_tnl_flags_to_gre_flags(flags);
353 greh->protocol = proto;
354
355 if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
356 __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
357
358 if (flags & TUNNEL_SEQ) {
359 *ptr = seq;
360 ptr--;
361 }
362 if (flags & TUNNEL_KEY) {
363 *ptr = key;
364 ptr--;
365 }
366 if (flags & TUNNEL_CSUM &&
367 !(skb_shinfo(skb)->gso_type &
368 (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
369 *ptr = 0;
370 *(__sum16 *)ptr = gre_checksum(skb);
371 }
372 }
373 }
374
375 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
376 const struct iphdr *tnl_params,
377 __be16 proto)
378 {
379 struct ip_tunnel *tunnel = netdev_priv(dev);
380
381 if (tunnel->parms.o_flags & TUNNEL_SEQ)
382 tunnel->o_seqno++;
383
384 /* Push GRE header. */
385 build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
386 proto, tunnel->parms.o_key, htonl(tunnel->o_seqno));
387
388 skb_set_inner_protocol(skb, proto);
389 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
390 }
391
392 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
393 {
394 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
395 }
396
397 static struct rtable *gre_get_rt(struct sk_buff *skb,
398 struct net_device *dev,
399 struct flowi4 *fl,
400 const struct ip_tunnel_key *key)
401 {
402 struct net *net = dev_net(dev);
403
404 memset(fl, 0, sizeof(*fl));
405 fl->daddr = key->u.ipv4.dst;
406 fl->saddr = key->u.ipv4.src;
407 fl->flowi4_tos = RT_TOS(key->tos);
408 fl->flowi4_mark = skb->mark;
409 fl->flowi4_proto = IPPROTO_GRE;
410
411 return ip_route_output_key(net, fl);
412 }
413
414 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
415 {
416 struct ip_tunnel_info *tun_info;
417 const struct ip_tunnel_key *key;
418 struct rtable *rt = NULL;
419 struct flowi4 fl;
420 int min_headroom;
421 int tunnel_hlen;
422 __be16 df, flags;
423 bool use_cache;
424 int err;
425
426 tun_info = skb_tunnel_info(skb);
427 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
428 ip_tunnel_info_af(tun_info) != AF_INET))
429 goto err_free_skb;
430
431 key = &tun_info->key;
432 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
433 if (use_cache)
434 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr);
435 if (!rt) {
436 rt = gre_get_rt(skb, dev, &fl, key);
437 if (IS_ERR(rt))
438 goto err_free_skb;
439 if (use_cache)
440 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
441 fl.saddr);
442 }
443
444 tunnel_hlen = gre_calc_hlen(key->tun_flags);
445
446 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
447 + tunnel_hlen + sizeof(struct iphdr);
448 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
449 int head_delta = SKB_DATA_ALIGN(min_headroom -
450 skb_headroom(skb) +
451 16);
452 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
453 0, GFP_ATOMIC);
454 if (unlikely(err))
455 goto err_free_rt;
456 }
457
458 /* Push Tunnel header. */
459 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
460 goto err_free_rt;
461
462 flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
463 build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
464 tunnel_id_to_key(tun_info->key.tun_id), 0);
465
466 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
467
468 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
469 key->tos, key->ttl, df, false);
470 return;
471
472 err_free_rt:
473 ip_rt_put(rt);
474 err_free_skb:
475 kfree_skb(skb);
476 dev->stats.tx_dropped++;
477 }
478
479 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
480 {
481 struct ip_tunnel_info *info = skb_tunnel_info(skb);
482 struct rtable *rt;
483 struct flowi4 fl4;
484
485 if (ip_tunnel_info_af(info) != AF_INET)
486 return -EINVAL;
487
488 rt = gre_get_rt(skb, dev, &fl4, &info->key);
489 if (IS_ERR(rt))
490 return PTR_ERR(rt);
491
492 ip_rt_put(rt);
493 info->key.u.ipv4.src = fl4.saddr;
494 return 0;
495 }
496
497 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
498 struct net_device *dev)
499 {
500 struct ip_tunnel *tunnel = netdev_priv(dev);
501 const struct iphdr *tnl_params;
502
503 if (tunnel->collect_md) {
504 gre_fb_xmit(skb, dev);
505 return NETDEV_TX_OK;
506 }
507
508 if (dev->header_ops) {
509 /* Need space for new headers */
510 if (skb_cow_head(skb, dev->needed_headroom -
511 (tunnel->hlen + sizeof(struct iphdr))))
512 goto free_skb;
513
514 tnl_params = (const struct iphdr *)skb->data;
515
516 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
517 * to gre header.
518 */
519 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
520 skb_reset_mac_header(skb);
521 } else {
522 if (skb_cow_head(skb, dev->needed_headroom))
523 goto free_skb;
524
525 tnl_params = &tunnel->parms.iph;
526 }
527
528 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
529 goto free_skb;
530
531 __gre_xmit(skb, dev, tnl_params, skb->protocol);
532 return NETDEV_TX_OK;
533
534 free_skb:
535 kfree_skb(skb);
536 dev->stats.tx_dropped++;
537 return NETDEV_TX_OK;
538 }
539
540 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
541 struct net_device *dev)
542 {
543 struct ip_tunnel *tunnel = netdev_priv(dev);
544
545 if (tunnel->collect_md) {
546 gre_fb_xmit(skb, dev);
547 return NETDEV_TX_OK;
548 }
549
550 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
551 goto free_skb;
552
553 if (skb_cow_head(skb, dev->needed_headroom))
554 goto free_skb;
555
556 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
557 return NETDEV_TX_OK;
558
559 free_skb:
560 kfree_skb(skb);
561 dev->stats.tx_dropped++;
562 return NETDEV_TX_OK;
563 }
564
565 static int ipgre_tunnel_ioctl(struct net_device *dev,
566 struct ifreq *ifr, int cmd)
567 {
568 int err;
569 struct ip_tunnel_parm p;
570
571 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
572 return -EFAULT;
573 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
574 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
575 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
576 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
577 return -EINVAL;
578 }
579 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
580 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
581
582 err = ip_tunnel_ioctl(dev, &p, cmd);
583 if (err)
584 return err;
585
586 p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
587 p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
588
589 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
590 return -EFAULT;
591 return 0;
592 }
593
594 /* Nice toy. Unfortunately, useless in real life :-)
595 It allows to construct virtual multiprotocol broadcast "LAN"
596 over the Internet, provided multicast routing is tuned.
597
598
599 I have no idea was this bicycle invented before me,
600 so that I had to set ARPHRD_IPGRE to a random value.
601 I have an impression, that Cisco could make something similar,
602 but this feature is apparently missing in IOS<=11.2(8).
603
604 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
605 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
606
607 ping -t 255 224.66.66.66
608
609 If nobody answers, mbone does not work.
610
611 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
612 ip addr add 10.66.66.<somewhat>/24 dev Universe
613 ifconfig Universe up
614 ifconfig Universe add fe80::<Your_real_addr>/10
615 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
616 ftp 10.66.66.66
617 ...
618 ftp fec0:6666:6666::193.233.7.65
619 ...
620 */
621 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
622 unsigned short type,
623 const void *daddr, const void *saddr, unsigned int len)
624 {
625 struct ip_tunnel *t = netdev_priv(dev);
626 struct iphdr *iph;
627 struct gre_base_hdr *greh;
628
629 iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
630 greh = (struct gre_base_hdr *)(iph+1);
631 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
632 greh->protocol = htons(type);
633
634 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
635
636 /* Set the source hardware address. */
637 if (saddr)
638 memcpy(&iph->saddr, saddr, 4);
639 if (daddr)
640 memcpy(&iph->daddr, daddr, 4);
641 if (iph->daddr)
642 return t->hlen + sizeof(*iph);
643
644 return -(t->hlen + sizeof(*iph));
645 }
646
647 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
648 {
649 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
650 memcpy(haddr, &iph->saddr, 4);
651 return 4;
652 }
653
654 static const struct header_ops ipgre_header_ops = {
655 .create = ipgre_header,
656 .parse = ipgre_header_parse,
657 };
658
659 #ifdef CONFIG_NET_IPGRE_BROADCAST
660 static int ipgre_open(struct net_device *dev)
661 {
662 struct ip_tunnel *t = netdev_priv(dev);
663
664 if (ipv4_is_multicast(t->parms.iph.daddr)) {
665 struct flowi4 fl4;
666 struct rtable *rt;
667
668 rt = ip_route_output_gre(t->net, &fl4,
669 t->parms.iph.daddr,
670 t->parms.iph.saddr,
671 t->parms.o_key,
672 RT_TOS(t->parms.iph.tos),
673 t->parms.link);
674 if (IS_ERR(rt))
675 return -EADDRNOTAVAIL;
676 dev = rt->dst.dev;
677 ip_rt_put(rt);
678 if (!__in_dev_get_rtnl(dev))
679 return -EADDRNOTAVAIL;
680 t->mlink = dev->ifindex;
681 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
682 }
683 return 0;
684 }
685
686 static int ipgre_close(struct net_device *dev)
687 {
688 struct ip_tunnel *t = netdev_priv(dev);
689
690 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
691 struct in_device *in_dev;
692 in_dev = inetdev_by_index(t->net, t->mlink);
693 if (in_dev)
694 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
695 }
696 return 0;
697 }
698 #endif
699
700 static const struct net_device_ops ipgre_netdev_ops = {
701 .ndo_init = ipgre_tunnel_init,
702 .ndo_uninit = ip_tunnel_uninit,
703 #ifdef CONFIG_NET_IPGRE_BROADCAST
704 .ndo_open = ipgre_open,
705 .ndo_stop = ipgre_close,
706 #endif
707 .ndo_start_xmit = ipgre_xmit,
708 .ndo_do_ioctl = ipgre_tunnel_ioctl,
709 .ndo_change_mtu = ip_tunnel_change_mtu,
710 .ndo_get_stats64 = ip_tunnel_get_stats64,
711 .ndo_get_iflink = ip_tunnel_get_iflink,
712 };
713
714 #define GRE_FEATURES (NETIF_F_SG | \
715 NETIF_F_FRAGLIST | \
716 NETIF_F_HIGHDMA | \
717 NETIF_F_HW_CSUM)
718
719 static void ipgre_tunnel_setup(struct net_device *dev)
720 {
721 dev->netdev_ops = &ipgre_netdev_ops;
722 dev->type = ARPHRD_IPGRE;
723 ip_tunnel_setup(dev, ipgre_net_id);
724 }
725
726 static void __gre_tunnel_init(struct net_device *dev)
727 {
728 struct ip_tunnel *tunnel;
729 int t_hlen;
730
731 tunnel = netdev_priv(dev);
732 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
733 tunnel->parms.iph.protocol = IPPROTO_GRE;
734
735 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
736
737 t_hlen = tunnel->hlen + sizeof(struct iphdr);
738
739 dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
740 dev->mtu = ETH_DATA_LEN - t_hlen - 4;
741
742 dev->features |= GRE_FEATURES;
743 dev->hw_features |= GRE_FEATURES;
744
745 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
746 /* TCP offload with GRE SEQ is not supported, nor
747 * can we support 2 levels of outer headers requiring
748 * an update.
749 */
750 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
751 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
752 dev->features |= NETIF_F_GSO_SOFTWARE;
753 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
754 }
755
756 /* Can use a lockless transmit, unless we generate
757 * output sequences
758 */
759 dev->features |= NETIF_F_LLTX;
760 }
761 }
762
763 static int ipgre_tunnel_init(struct net_device *dev)
764 {
765 struct ip_tunnel *tunnel = netdev_priv(dev);
766 struct iphdr *iph = &tunnel->parms.iph;
767
768 __gre_tunnel_init(dev);
769
770 memcpy(dev->dev_addr, &iph->saddr, 4);
771 memcpy(dev->broadcast, &iph->daddr, 4);
772
773 dev->flags = IFF_NOARP;
774 netif_keep_dst(dev);
775 dev->addr_len = 4;
776
777 if (iph->daddr) {
778 #ifdef CONFIG_NET_IPGRE_BROADCAST
779 if (ipv4_is_multicast(iph->daddr)) {
780 if (!iph->saddr)
781 return -EINVAL;
782 dev->flags = IFF_BROADCAST;
783 dev->header_ops = &ipgre_header_ops;
784 }
785 #endif
786 } else
787 dev->header_ops = &ipgre_header_ops;
788
789 return ip_tunnel_init(dev);
790 }
791
792 static const struct gre_protocol ipgre_protocol = {
793 .handler = gre_rcv,
794 .err_handler = gre_err,
795 };
796
797 static int __net_init ipgre_init_net(struct net *net)
798 {
799 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
800 }
801
802 static void __net_exit ipgre_exit_net(struct net *net)
803 {
804 struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
805 ip_tunnel_delete_net(itn, &ipgre_link_ops);
806 }
807
808 static struct pernet_operations ipgre_net_ops = {
809 .init = ipgre_init_net,
810 .exit = ipgre_exit_net,
811 .id = &ipgre_net_id,
812 .size = sizeof(struct ip_tunnel_net),
813 };
814
815 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
816 {
817 __be16 flags;
818
819 if (!data)
820 return 0;
821
822 flags = 0;
823 if (data[IFLA_GRE_IFLAGS])
824 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
825 if (data[IFLA_GRE_OFLAGS])
826 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
827 if (flags & (GRE_VERSION|GRE_ROUTING))
828 return -EINVAL;
829
830 return 0;
831 }
832
833 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
834 {
835 __be32 daddr;
836
837 if (tb[IFLA_ADDRESS]) {
838 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
839 return -EINVAL;
840 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
841 return -EADDRNOTAVAIL;
842 }
843
844 if (!data)
845 goto out;
846
847 if (data[IFLA_GRE_REMOTE]) {
848 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
849 if (!daddr)
850 return -EINVAL;
851 }
852
853 out:
854 return ipgre_tunnel_validate(tb, data);
855 }
856
857 static void ipgre_netlink_parms(struct net_device *dev,
858 struct nlattr *data[],
859 struct nlattr *tb[],
860 struct ip_tunnel_parm *parms)
861 {
862 memset(parms, 0, sizeof(*parms));
863
864 parms->iph.protocol = IPPROTO_GRE;
865
866 if (!data)
867 return;
868
869 if (data[IFLA_GRE_LINK])
870 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
871
872 if (data[IFLA_GRE_IFLAGS])
873 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
874
875 if (data[IFLA_GRE_OFLAGS])
876 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
877
878 if (data[IFLA_GRE_IKEY])
879 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
880
881 if (data[IFLA_GRE_OKEY])
882 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
883
884 if (data[IFLA_GRE_LOCAL])
885 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
886
887 if (data[IFLA_GRE_REMOTE])
888 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
889
890 if (data[IFLA_GRE_TTL])
891 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
892
893 if (data[IFLA_GRE_TOS])
894 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
895
896 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
897 parms->iph.frag_off = htons(IP_DF);
898
899 if (data[IFLA_GRE_COLLECT_METADATA]) {
900 struct ip_tunnel *t = netdev_priv(dev);
901
902 t->collect_md = true;
903 }
904 }
905
906 /* This function returns true when ENCAP attributes are present in the nl msg */
907 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
908 struct ip_tunnel_encap *ipencap)
909 {
910 bool ret = false;
911
912 memset(ipencap, 0, sizeof(*ipencap));
913
914 if (!data)
915 return ret;
916
917 if (data[IFLA_GRE_ENCAP_TYPE]) {
918 ret = true;
919 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
920 }
921
922 if (data[IFLA_GRE_ENCAP_FLAGS]) {
923 ret = true;
924 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
925 }
926
927 if (data[IFLA_GRE_ENCAP_SPORT]) {
928 ret = true;
929 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
930 }
931
932 if (data[IFLA_GRE_ENCAP_DPORT]) {
933 ret = true;
934 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
935 }
936
937 return ret;
938 }
939
940 static int gre_tap_init(struct net_device *dev)
941 {
942 __gre_tunnel_init(dev);
943 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
944
945 return ip_tunnel_init(dev);
946 }
947
948 static const struct net_device_ops gre_tap_netdev_ops = {
949 .ndo_init = gre_tap_init,
950 .ndo_uninit = ip_tunnel_uninit,
951 .ndo_start_xmit = gre_tap_xmit,
952 .ndo_set_mac_address = eth_mac_addr,
953 .ndo_validate_addr = eth_validate_addr,
954 .ndo_change_mtu = ip_tunnel_change_mtu,
955 .ndo_get_stats64 = ip_tunnel_get_stats64,
956 .ndo_get_iflink = ip_tunnel_get_iflink,
957 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
958 };
959
960 static void ipgre_tap_setup(struct net_device *dev)
961 {
962 ether_setup(dev);
963 dev->netdev_ops = &gre_tap_netdev_ops;
964 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
965 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
966 ip_tunnel_setup(dev, gre_tap_net_id);
967 }
968
969 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
970 struct nlattr *tb[], struct nlattr *data[])
971 {
972 struct ip_tunnel_parm p;
973 struct ip_tunnel_encap ipencap;
974
975 if (ipgre_netlink_encap_parms(data, &ipencap)) {
976 struct ip_tunnel *t = netdev_priv(dev);
977 int err = ip_tunnel_encap_setup(t, &ipencap);
978
979 if (err < 0)
980 return err;
981 }
982
983 ipgre_netlink_parms(dev, data, tb, &p);
984 return ip_tunnel_newlink(dev, tb, &p);
985 }
986
987 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
988 struct nlattr *data[])
989 {
990 struct ip_tunnel_parm p;
991 struct ip_tunnel_encap ipencap;
992
993 if (ipgre_netlink_encap_parms(data, &ipencap)) {
994 struct ip_tunnel *t = netdev_priv(dev);
995 int err = ip_tunnel_encap_setup(t, &ipencap);
996
997 if (err < 0)
998 return err;
999 }
1000
1001 ipgre_netlink_parms(dev, data, tb, &p);
1002 return ip_tunnel_changelink(dev, tb, &p);
1003 }
1004
1005 static size_t ipgre_get_size(const struct net_device *dev)
1006 {
1007 return
1008 /* IFLA_GRE_LINK */
1009 nla_total_size(4) +
1010 /* IFLA_GRE_IFLAGS */
1011 nla_total_size(2) +
1012 /* IFLA_GRE_OFLAGS */
1013 nla_total_size(2) +
1014 /* IFLA_GRE_IKEY */
1015 nla_total_size(4) +
1016 /* IFLA_GRE_OKEY */
1017 nla_total_size(4) +
1018 /* IFLA_GRE_LOCAL */
1019 nla_total_size(4) +
1020 /* IFLA_GRE_REMOTE */
1021 nla_total_size(4) +
1022 /* IFLA_GRE_TTL */
1023 nla_total_size(1) +
1024 /* IFLA_GRE_TOS */
1025 nla_total_size(1) +
1026 /* IFLA_GRE_PMTUDISC */
1027 nla_total_size(1) +
1028 /* IFLA_GRE_ENCAP_TYPE */
1029 nla_total_size(2) +
1030 /* IFLA_GRE_ENCAP_FLAGS */
1031 nla_total_size(2) +
1032 /* IFLA_GRE_ENCAP_SPORT */
1033 nla_total_size(2) +
1034 /* IFLA_GRE_ENCAP_DPORT */
1035 nla_total_size(2) +
1036 /* IFLA_GRE_COLLECT_METADATA */
1037 nla_total_size(0) +
1038 0;
1039 }
1040
1041 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1042 {
1043 struct ip_tunnel *t = netdev_priv(dev);
1044 struct ip_tunnel_parm *p = &t->parms;
1045
1046 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1047 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1048 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1049 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1050 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1051 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1052 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1053 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1054 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1055 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1056 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1057 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1058 !!(p->iph.frag_off & htons(IP_DF))))
1059 goto nla_put_failure;
1060
1061 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1062 t->encap.type) ||
1063 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1064 t->encap.sport) ||
1065 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1066 t->encap.dport) ||
1067 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1068 t->encap.flags))
1069 goto nla_put_failure;
1070
1071 if (t->collect_md) {
1072 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1073 goto nla_put_failure;
1074 }
1075
1076 return 0;
1077
1078 nla_put_failure:
1079 return -EMSGSIZE;
1080 }
1081
1082 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1083 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1084 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1085 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1086 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1087 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1088 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1089 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1090 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1091 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1092 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1093 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1094 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1095 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1096 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1097 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1098 };
1099
1100 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1101 .kind = "gre",
1102 .maxtype = IFLA_GRE_MAX,
1103 .policy = ipgre_policy,
1104 .priv_size = sizeof(struct ip_tunnel),
1105 .setup = ipgre_tunnel_setup,
1106 .validate = ipgre_tunnel_validate,
1107 .newlink = ipgre_newlink,
1108 .changelink = ipgre_changelink,
1109 .dellink = ip_tunnel_dellink,
1110 .get_size = ipgre_get_size,
1111 .fill_info = ipgre_fill_info,
1112 .get_link_net = ip_tunnel_get_link_net,
1113 };
1114
1115 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1116 .kind = "gretap",
1117 .maxtype = IFLA_GRE_MAX,
1118 .policy = ipgre_policy,
1119 .priv_size = sizeof(struct ip_tunnel),
1120 .setup = ipgre_tap_setup,
1121 .validate = ipgre_tap_validate,
1122 .newlink = ipgre_newlink,
1123 .changelink = ipgre_changelink,
1124 .dellink = ip_tunnel_dellink,
1125 .get_size = ipgre_get_size,
1126 .fill_info = ipgre_fill_info,
1127 .get_link_net = ip_tunnel_get_link_net,
1128 };
1129
1130 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1131 u8 name_assign_type)
1132 {
1133 struct nlattr *tb[IFLA_MAX + 1];
1134 struct net_device *dev;
1135 struct ip_tunnel *t;
1136 int err;
1137
1138 memset(&tb, 0, sizeof(tb));
1139
1140 dev = rtnl_create_link(net, name, name_assign_type,
1141 &ipgre_tap_ops, tb);
1142 if (IS_ERR(dev))
1143 return dev;
1144
1145 /* Configure flow based GRE device. */
1146 t = netdev_priv(dev);
1147 t->collect_md = true;
1148
1149 err = ipgre_newlink(net, dev, tb, NULL);
1150 if (err < 0)
1151 goto out;
1152
1153 /* openvswitch users expect packet sizes to be unrestricted,
1154 * so set the largest MTU we can.
1155 */
1156 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1157 if (err)
1158 goto out;
1159
1160 return dev;
1161 out:
1162 free_netdev(dev);
1163 return ERR_PTR(err);
1164 }
1165 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1166
1167 static int __net_init ipgre_tap_init_net(struct net *net)
1168 {
1169 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1170 }
1171
1172 static void __net_exit ipgre_tap_exit_net(struct net *net)
1173 {
1174 struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
1175 ip_tunnel_delete_net(itn, &ipgre_tap_ops);
1176 }
1177
1178 static struct pernet_operations ipgre_tap_net_ops = {
1179 .init = ipgre_tap_init_net,
1180 .exit = ipgre_tap_exit_net,
1181 .id = &gre_tap_net_id,
1182 .size = sizeof(struct ip_tunnel_net),
1183 };
1184
1185 static int __init ipgre_init(void)
1186 {
1187 int err;
1188
1189 pr_info("GRE over IPv4 tunneling driver\n");
1190
1191 err = register_pernet_device(&ipgre_net_ops);
1192 if (err < 0)
1193 return err;
1194
1195 err = register_pernet_device(&ipgre_tap_net_ops);
1196 if (err < 0)
1197 goto pnet_tap_faied;
1198
1199 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1200 if (err < 0) {
1201 pr_info("%s: can't add protocol\n", __func__);
1202 goto add_proto_failed;
1203 }
1204
1205 err = rtnl_link_register(&ipgre_link_ops);
1206 if (err < 0)
1207 goto rtnl_link_failed;
1208
1209 err = rtnl_link_register(&ipgre_tap_ops);
1210 if (err < 0)
1211 goto tap_ops_failed;
1212
1213 return 0;
1214
1215 tap_ops_failed:
1216 rtnl_link_unregister(&ipgre_link_ops);
1217 rtnl_link_failed:
1218 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1219 add_proto_failed:
1220 unregister_pernet_device(&ipgre_tap_net_ops);
1221 pnet_tap_faied:
1222 unregister_pernet_device(&ipgre_net_ops);
1223 return err;
1224 }
1225
1226 static void __exit ipgre_fini(void)
1227 {
1228 rtnl_link_unregister(&ipgre_tap_ops);
1229 rtnl_link_unregister(&ipgre_link_ops);
1230 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1231 unregister_pernet_device(&ipgre_tap_net_ops);
1232 unregister_pernet_device(&ipgre_net_ops);
1233 }
1234
1235 module_init(ipgre_init);
1236 module_exit(ipgre_fini);
1237 MODULE_LICENSE("GPL");
1238 MODULE_ALIAS_RTNL_LINK("gre");
1239 MODULE_ALIAS_RTNL_LINK("gretap");
1240 MODULE_ALIAS_NETDEV("gre0");
1241 MODULE_ALIAS_NETDEV("gretap0");
This page took 0.057823 seconds and 6 git commands to generate.